镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 14:36:48 +00:00
比较提交
223 次代码提交
version3.5
...
hongyi-zha
| 作者 | SHA1 | 提交日期 | |
|---|---|---|---|
|
|
e8c17a099e | ||
|
|
3f36cfea38 | ||
|
|
f889ef7625 | ||
|
|
a93bf4410d | ||
|
|
1c0764753a | ||
|
|
c847209ac9 | ||
|
|
4f9d40c14f | ||
|
|
91926d24b7 | ||
|
|
ef311c4859 | ||
|
|
82795d3817 | ||
|
|
49e28a5a00 | ||
|
|
01def2e329 | ||
|
|
2291be2b28 | ||
|
|
c89ec7969f | ||
|
|
1506c19834 | ||
|
|
a6fdc493b7 | ||
|
|
113067c6ab | ||
|
|
7b6828ab07 | ||
|
|
d818c38dfe | ||
|
|
08b4e9796e | ||
|
|
b55d573819 | ||
|
|
06b0e800a2 | ||
|
|
7bbaf05961 | ||
|
|
3b83279855 | ||
|
|
37164a826e | ||
|
|
dd2a97e7a9 | ||
|
|
e579006c4a | ||
|
|
031f19b6dd | ||
|
|
142b516749 | ||
|
|
f2e73aa580 | ||
|
|
8565a35cf7 | ||
|
|
72d78eb150 | ||
|
|
7aeda537ac | ||
|
|
6cea17d4b7 | ||
|
|
20bc51d747 | ||
|
|
b8ebefa427 | ||
|
|
dcc9326f0b | ||
|
|
94fc396eb9 | ||
|
|
e594e1b928 | ||
|
|
8fe545d97b | ||
|
|
6f978fa72e | ||
|
|
19be471aa8 | ||
|
|
38956934fd | ||
|
|
32439e14b5 | ||
|
|
317389bf4b | ||
|
|
2c740fc641 | ||
|
|
96832a8228 | ||
|
|
361557da3c | ||
|
|
5f18d4a1af | ||
|
|
0d10bc570f | ||
|
|
3ce7d9347d | ||
|
|
8a78d7b89f | ||
|
|
0e43b08837 | ||
|
|
74bced2d35 | ||
|
|
961a24846f | ||
|
|
b7e4744f28 | ||
|
|
71adc40901 | ||
|
|
a2099f1622 | ||
|
|
c0a697f6c8 | ||
|
|
bdde1d2fd7 | ||
|
|
63373ab3b6 | ||
|
|
fb6566adde | ||
|
|
9f2ef9ec49 | ||
|
|
35c1aa21e4 | ||
|
|
627d739720 | ||
|
|
37f15185b6 | ||
|
|
9643e1c25f | ||
|
|
28eae2f80e | ||
|
|
7ab379688e | ||
|
|
3d4c6f54f1 | ||
|
|
1714116a89 | ||
|
|
2bc65a99ca | ||
|
|
0a2805513e | ||
|
|
d698b96209 | ||
|
|
6b1c6f0bf7 | ||
|
|
c22867b74c | ||
|
|
2abe665521 | ||
|
|
b0e6c4d365 | ||
|
|
d883c7f34b | ||
|
|
aba871342f | ||
|
|
37744a9cb1 | ||
|
|
480516380d | ||
|
|
60ba712131 | ||
|
|
a7c960dcb0 | ||
|
|
a96f842b3a | ||
|
|
417ca91e23 | ||
|
|
ef8fadfa18 | ||
|
|
865c4ca993 | ||
|
|
31304f481a | ||
|
|
1bd3637d32 | ||
|
|
160a683667 | ||
|
|
49ca03ca06 | ||
|
|
c625348ce1 | ||
|
|
6d4a74893a | ||
|
|
5c7499cada | ||
|
|
f522691529 | ||
|
|
ca85573ec1 | ||
|
|
2c7bba5c63 | ||
|
|
e22f0226d5 | ||
|
|
0f250305b4 | ||
|
|
7606f5c130 | ||
|
|
4f0dcc431c | ||
|
|
6ca0dd2f9e | ||
|
|
e3e9921f6b | ||
|
|
867ddd355e | ||
|
|
bb431db7d3 | ||
|
|
43568b83e1 | ||
|
|
2b90302851 | ||
|
|
f7588d4776 | ||
|
|
a0bfa7ba1c | ||
|
|
c60a7452bf | ||
|
|
68a49d3758 | ||
|
|
ac3d4cf073 | ||
|
|
9479dd984c | ||
|
|
3c271302cc | ||
|
|
6e9936531d | ||
|
|
439147e4b7 | ||
|
|
8d13821099 | ||
|
|
49fe06ed69 | ||
|
|
7882ce7304 | ||
|
|
dc68e601a5 | ||
|
|
d169fb4b16 | ||
|
|
36e19d5202 | ||
|
|
c5f1e4e392 | ||
|
|
d3f7267a63 | ||
|
|
f4127a9c9c | ||
|
|
c181ad38b4 | ||
|
|
107944f5b7 | ||
|
|
8c7569b689 | ||
|
|
fa374bf1fc | ||
|
|
c0a36e37be | ||
|
|
2f2b869efd | ||
|
|
2f148bada0 | ||
|
|
916b2e8aa7 | ||
|
|
0cb7dd5280 | ||
|
|
892ccb14c7 | ||
|
|
21bccf69d2 | ||
|
|
7bac8f4bd3 | ||
|
|
d0c2923ab1 | ||
|
|
8a6e96c369 | ||
|
|
49f3fcf2c0 | ||
|
|
2b96a60b76 | ||
|
|
ec60a85cac | ||
|
|
647d9f88db | ||
|
|
b0c627909a | ||
|
|
102bf2f1eb | ||
|
|
26291b33d1 | ||
|
|
4f04d810b7 | ||
|
|
6d2f126253 | ||
|
|
e5b296d221 | ||
|
|
7933675c12 | ||
|
|
692ff4b59c | ||
|
|
4d8b535c79 | ||
|
|
3c03f240ba | ||
|
|
9bfc3400f9 | ||
|
|
95504f0bb7 | ||
|
|
0cd3274d04 | ||
|
|
2cef81abbe | ||
|
|
6f9bc5d206 | ||
|
|
94ab41d3c0 | ||
|
|
da376068e1 | ||
|
|
552219fd5a | ||
|
|
4985986243 | ||
|
|
d99b443b4c | ||
|
|
2aab6cb708 | ||
|
|
1134723c80 | ||
|
|
6126024f2c | ||
|
|
ef12d4f754 | ||
|
|
e8dd3c02f2 | ||
|
|
e7f4c804eb | ||
|
|
3d6ee5c755 | ||
|
|
d8958da8cd | ||
|
|
a64d550045 | ||
|
|
d876a81e78 | ||
|
|
6723eb77b2 | ||
|
|
86891e3535 | ||
|
|
2f805db35d | ||
|
|
ecaf2bdf45 | ||
|
|
22e00eb1c5 | ||
|
|
900fad69cf | ||
|
|
55d807c116 | ||
|
|
9a0ed248ca | ||
|
|
88802b0f72 | ||
|
|
5720ac127c | ||
|
|
f44642d9d2 | ||
|
|
29775dedd8 | ||
|
|
6417ca9dde | ||
|
|
f417c1ce6d | ||
|
|
e4c057f5a3 | ||
|
|
f9e9b6f4ec | ||
|
|
c141e767c6 | ||
|
|
17f361d63b | ||
|
|
8780fe29f1 | ||
|
|
d57bb8afbe | ||
|
|
d39945c415 | ||
|
|
688df6aa24 | ||
|
|
b24fef8a61 | ||
|
|
8c840f3d4c | ||
|
|
577d3d566b | ||
|
|
fd92766083 | ||
|
|
2d2e02040d | ||
|
|
aee57364dd | ||
|
|
7ca37c4831 | ||
|
|
5b06a6cae5 | ||
|
|
5d5695cd9a | ||
|
|
fd72894c90 | ||
|
|
c1abec2e4b | ||
|
|
9916f59753 | ||
|
|
e6716ccf63 | ||
|
|
e533ed6d12 | ||
|
|
4fefbb80ac | ||
|
|
1253a2b0a6 | ||
|
|
71537b570f | ||
|
|
203d5f7296 | ||
|
|
7754215dad | ||
|
|
b470af7c7b | ||
|
|
f8c5f9045d | ||
|
|
c7a0a5f207 | ||
|
|
b1be05009b | ||
|
|
cdca36f5d2 | ||
|
|
6ed88fe848 | ||
|
|
ea4e03b1d8 | ||
|
|
aa341fd268 |
6
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
6
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -69,9 +69,3 @@ body:
|
|||||||
attributes:
|
attributes:
|
||||||
label: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
|
label: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
|
||||||
description: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
|
description: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
5
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
5
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -21,8 +21,3 @@ body:
|
|||||||
attributes:
|
attributes:
|
||||||
label: Feature Request | 功能请求
|
label: Feature Request | 功能请求
|
||||||
description: Feature Request | 功能请求
|
description: Feature Request | 功能请求
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
44
.github/workflows/build-with-all-capacity-beta.yml
vendored
普通文件
44
.github/workflows/build-with-all-capacity-beta.yml
vendored
普通文件
@@ -0,0 +1,44 @@
|
|||||||
|
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||||
|
name: build-with-all-capacity-beta
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'master'
|
||||||
|
|
||||||
|
env:
|
||||||
|
REGISTRY: ghcr.io
|
||||||
|
IMAGE_NAME: ${{ github.repository }}_with_all_capacity_beta
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-and-push-image:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Log in to the Container registry
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
registry: ${{ env.REGISTRY }}
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Extract metadata (tags, labels) for Docker
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v4
|
||||||
|
with:
|
||||||
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
|
|
||||||
|
- name: Build and push Docker image
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: true
|
||||||
|
file: docs/GithubAction+AllCapacityBeta
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -152,3 +152,4 @@ request_llms/moss
|
|||||||
media
|
media
|
||||||
flagged
|
flagged
|
||||||
request_llms/ChatGLM-6b-onnx-u8s8
|
request_llms/ChatGLM-6b-onnx-u8s8
|
||||||
|
.pre-commit-config.yaml
|
||||||
|
|||||||
@@ -1,10 +0,0 @@
|
|||||||
# See https://pre-commit.com for more information
|
|
||||||
# See https://pre-commit.com/hooks.html for more hooks
|
|
||||||
repos:
|
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
|
||||||
rev: v3.2.0
|
|
||||||
hooks:
|
|
||||||
- id: trailing-whitespace
|
|
||||||
- id: end-of-file-fixer
|
|
||||||
- id: check-yaml
|
|
||||||
- id: check-added-large-files
|
|
||||||
@@ -18,7 +18,6 @@ WORKDIR /gpt
|
|||||||
|
|
||||||
# 安装大部分依赖,利用Docker缓存加速以后的构建 (以下三行,可以删除)
|
# 安装大部分依赖,利用Docker缓存加速以后的构建 (以下三行,可以删除)
|
||||||
COPY requirements.txt ./
|
COPY requirements.txt ./
|
||||||
COPY ./docs/gradio-3.32.6-py3-none-any.whl ./docs/gradio-3.32.6-py3-none-any.whl
|
|
||||||
RUN pip3 install -r requirements.txt
|
RUN pip3 install -r requirements.txt
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
248
README.md
248
README.md
@@ -1,69 +1,94 @@
|
|||||||
> **Note**
|
> [!IMPORTANT]
|
||||||
>
|
> 2024.1.18: 更新3.70版本,支持Mermaid绘图库(让大模型绘制脑图)
|
||||||
> 2023.11.12: 某些依赖包尚不兼容python 3.12,推荐python 3.11。
|
> 2024.1.17: 恭迎GLM4,全力支持Qwen、GLM、DeepseekCoder等国内中文大语言基座模型!
|
||||||
>
|
> 2024.1.17: 某些依赖包尚不兼容python 3.12,推荐python 3.11。
|
||||||
> 2023.11.7: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目开源免费,近期发现有人蔑视开源协议并利用本项目违规圈钱,请提高警惕,谨防上当受骗。
|
> 2024.1.17: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目完全开源免费,您可通过订阅[在线服务](https://github.com/binary-husky/gpt_academic/wiki/online)的方式鼓励本项目的发展。
|
||||||
|
|
||||||
|
<br>
|
||||||
|
|
||||||
|
<div align=center>
|
||||||
|
<h1 aligh="center">
|
||||||
|
<img src="docs/logo.png" width="40"> GPT 学术优化 (GPT Academic)
|
||||||
|
</h1>
|
||||||
|
|
||||||
|
[![Github][Github-image]][Github-url]
|
||||||
|
[![License][License-image]][License-url]
|
||||||
|
[![Releases][Releases-image]][Releases-url]
|
||||||
|
[![Installation][Installation-image]][Installation-url]
|
||||||
|
[![Wiki][Wiki-image]][Wiki-url]
|
||||||
|
[![PR][PRs-image]][PRs-url]
|
||||||
|
|
||||||
|
[Github-image]: https://img.shields.io/badge/github-12100E.svg?style=flat-square
|
||||||
|
[License-image]: https://img.shields.io/github/license/binary-husky/gpt_academic?label=License&style=flat-square&color=orange
|
||||||
|
[Releases-image]: https://img.shields.io/github/release/binary-husky/gpt_academic?label=Release&style=flat-square&color=blue
|
||||||
|
[Installation-image]: https://img.shields.io/badge/dynamic/json?color=blue&url=https://raw.githubusercontent.com/binary-husky/gpt_academic/master/version&query=$.version&label=Installation&style=flat-square
|
||||||
|
[Wiki-image]: https://img.shields.io/badge/wiki-项目文档-black?style=flat-square
|
||||||
|
[PRs-image]: https://img.shields.io/badge/PRs-welcome-pink?style=flat-square
|
||||||
|
|
||||||
|
[Github-url]: https://github.com/binary-husky/gpt_academic
|
||||||
|
[License-url]: https://github.com/binary-husky/gpt_academic/blob/master/LICENSE
|
||||||
|
[Releases-url]: https://github.com/binary-husky/gpt_academic/releases
|
||||||
|
[Installation-url]: https://github.com/binary-husky/gpt_academic#installation
|
||||||
|
[Wiki-url]: https://github.com/binary-husky/gpt_academic/wiki
|
||||||
|
[PRs-url]: https://github.com/binary-husky/gpt_academic/pulls
|
||||||
|
|
||||||
|
|
||||||
|
</div>
|
||||||
# <div align=center><img src="docs/logo.png" width="40"> GPT 学术优化 (GPT Academic)</div>
|
<br>
|
||||||
|
|
||||||
**如果喜欢这个项目,请给它一个Star;如果您发明了好用的快捷键或插件,欢迎发pull requests!**
|
**如果喜欢这个项目,请给它一个Star;如果您发明了好用的快捷键或插件,欢迎发pull requests!**
|
||||||
|
|
||||||
If you like this project, please give it a Star. We also have a README in [English|](docs/README.English.md)[日本語|](docs/README.Japanese.md)[한국어|](docs/README.Korean.md)[Русский|](docs/README.Russian.md)[Français](docs/README.French.md) translated by this project itself.
|
If you like this project, please give it a Star.
|
||||||
To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).
|
Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanese.md) | [한국어](docs/README.Korean.md) | [Русский](docs/README.Russian.md) | [Français](docs/README.French.md). All translations have been provided by the project itself. To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).
|
||||||
|
<br>
|
||||||
|
|
||||||
> **Note**
|
> [!NOTE]
|
||||||
|
> 1.本项目中每个文件的功能都在[自译解报告](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)`self_analysis.md`详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题请查阅wiki。
|
||||||
|
> [](#installation) [](https://github.com/binary-husky/gpt_academic/releases) [](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) []([https://github.com/binary-husky/gpt_academic/wiki/项目配置说明](https://github.com/binary-husky/gpt_academic/wiki))
|
||||||
>
|
>
|
||||||
> 1.请注意只有 **高亮** 标识的插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR。
|
> 2.本项目兼容并鼓励尝试国内中文大语言基座模型如通义千问,智谱GLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交即可生效。
|
||||||
>
|
|
||||||
> 2.本项目中每个文件的功能都在[自译解报告`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题[`wiki`](https://github.com/binary-husky/gpt_academic/wiki)。[常规安装方法](#installation) | [一键安装脚本](https://github.com/binary-husky/gpt_academic/releases) | [配置说明](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
|
|
||||||
>
|
|
||||||
> 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<br><br>
|
||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||
功能(⭐= 近期新增功能) | 描述
|
功能(⭐= 近期新增功能) | 描述
|
||||||
--- | ---
|
--- | ---
|
||||||
⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, [通义千问](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf),智谱API,DALLE3
|
⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B) | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, 通义千问[Qwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf),[智谱GLM4](https://open.bigmodel.cn/),DALLE3, [DeepseekCoder](https://coder.deepseek.com/)
|
||||||
|
⭐支持mermaid图像渲染 | 支持让GPT生成[流程图](https://www.bilibili.com/video/BV18c41147H9/)、状态转移图、甘特图、饼状图、GitGraph等等(3.7版本)
|
||||||
|
⭐Arxiv论文精细翻译 ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),目前最好的论文翻译工具
|
||||||
|
⭐[实时语音对话输入](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [插件] 异步[监听音频](https://www.bilibili.com/video/BV1AV4y187Uy/),自动断句,自动寻找回答时机
|
||||||
|
⭐AutoGen多智能体插件 | [插件] 借助微软AutoGen,探索多Agent的智能涌现可能!
|
||||||
|
⭐虚空终端插件 | [插件] 能够使用自然语言直接调度本项目其他插件
|
||||||
润色、翻译、代码解释 | 一键润色、翻译、查找论文语法错误、解释代码
|
润色、翻译、代码解释 | 一键润色、翻译、查找论文语法错误、解释代码
|
||||||
[自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键
|
[自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键
|
||||||
模块化设计 | 支持自定义强大的[插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
模块化设计 | 支持自定义强大的[插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
||||||
[程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [插件] 一键可以剖析Python/C/C++/Java/Lua/...项目树 或 [自我剖析](https://www.bilibili.com/video/BV1cj411A7VW)
|
[程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [插件] 一键剖析Python/C/C++/Java/Lua/...项目树 或 [自我剖析](https://www.bilibili.com/video/BV1cj411A7VW)
|
||||||
读论文、[翻译](https://www.bilibili.com/video/BV1KT411x7Wn)论文 | [插件] 一键解读latex/pdf论文全文并生成摘要
|
读论文、[翻译](https://www.bilibili.com/video/BV1KT411x7Wn)论文 | [插件] 一键解读latex/pdf论文全文并生成摘要
|
||||||
Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [插件] 一键翻译或润色latex论文
|
Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [插件] 一键翻译或润色latex论文
|
||||||
批量注释生成 | [插件] 一键批量生成函数注释
|
批量注释生成 | [插件] 一键批量生成函数注释
|
||||||
Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗?
|
Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗?就是出自他的手笔
|
||||||
chat分析报告生成 | [插件] 运行后自动生成总结汇报
|
|
||||||
[PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [插件] PDF论文提取题目&摘要+翻译全文(多线程)
|
[PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [插件] PDF论文提取题目&摘要+翻译全文(多线程)
|
||||||
[Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
|
[Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
|
||||||
Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼写纠错+输出对照PDF
|
Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼写纠错+输出对照PDF
|
||||||
[谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [插件] 给定任意谷歌学术搜索页面URL,让gpt帮你[写relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/)
|
[谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [插件] 给定任意谷歌学术搜索页面URL,让gpt帮你[写relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/)
|
||||||
互联网信息聚合+GPT | [插件] 一键[让GPT从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck)回答问题,让信息永不过时
|
互联网信息聚合+GPT | [插件] 一键[让GPT从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck)回答问题,让信息永不过时
|
||||||
⭐Arxiv论文精细翻译 ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),目前最好的论文翻译工具
|
|
||||||
⭐[实时语音对话输入](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [插件] 异步[监听音频](https://www.bilibili.com/video/BV1AV4y187Uy/),自动断句,自动寻找回答时机
|
|
||||||
公式/图片/表格显示 | 可以同时显示公式的[tex形式和渲染形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png),支持公式、代码高亮
|
公式/图片/表格显示 | 可以同时显示公式的[tex形式和渲染形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png),支持公式、代码高亮
|
||||||
⭐AutoGen多智能体插件 | [插件] 借助微软AutoGen,探索多Agent的智能涌现可能!
|
|
||||||
启动暗色[主题](https://github.com/binary-husky/gpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题
|
启动暗色[主题](https://github.com/binary-husky/gpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题
|
||||||
[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)同时伺候的感觉一定会很不错吧?
|
[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)伺候的感觉一定会很不错吧?
|
||||||
⭐ChatGLM2微调模型 | 支持加载ChatGLM2微调模型,提供ChatGLM2微调辅助插件
|
|
||||||
更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/)
|
更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/)
|
||||||
⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip包 | 脱离GUI,在Python中直接调用本项目的所有函数插件(开发中)
|
⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip包 | 脱离GUI,在Python中直接调用本项目的所有函数插件(开发中)
|
||||||
⭐虚空终端插件 | [插件] 用自然语言,直接调度本项目其他插件
|
|
||||||
更多新功能展示 (图像生成等) …… | 见本文档结尾处 ……
|
更多新功能展示 (图像生成等) …… | 见本文档结尾处 ……
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
||||||
- 新界面(修改`config.py`中的LAYOUT选项即可实现“左右布局”和“上下布局”的切换)
|
- 新界面(修改`config.py`中的LAYOUT选项即可实现“左右布局”和“上下布局”的切换)
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/d81137c3-affd-4cd1-bb5e-b15610389762" width="700" >
|
<img src="https://user-images.githubusercontent.com/96192199/279702205-d81137c3-affd-4cd1-bb5e-b15610389762.gif" width="700" >
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
||||||
- 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放粘贴板
|
- 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放剪贴板
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||||
</div>
|
</div>
|
||||||
@@ -73,57 +98,80 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
|
|||||||
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
- 如果输出包含公式,会同时以tex形式和渲染形式显示,方便复制和阅读
|
- 如果输出包含公式,会以tex形式和渲染形式同时显示,方便复制和阅读
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
- 懒得看项目代码?整个工程直接给chatgpt炫嘴里
|
- 懒得看项目代码?直接把整个工程炫ChatGPT嘴里
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
- 多种大语言模型混合调用(ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
- 多种大语言模型混合调用(ChatGLM + OpenAI-GPT3.5 + GPT4)
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<br><br>
|
||||||
|
|
||||||
# Installation
|
# Installation
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
A{"安装方法"} --> W1("I. 🔑直接运行 (Windows, Linux or MacOS)")
|
||||||
|
W1 --> W11["1. Python pip包管理依赖"]
|
||||||
|
W1 --> W12["2. Anaconda包管理依赖(推荐⭐)"]
|
||||||
|
|
||||||
|
A --> W2["II. 🐳使用Docker (Windows, Linux or MacOS)"]
|
||||||
|
|
||||||
|
W2 --> k1["1. 部署项目全部能力的大镜像(推荐⭐)"]
|
||||||
|
W2 --> k2["2. 仅在线模型(GPT, GLM4等)镜像"]
|
||||||
|
W2 --> k3["3. 在线模型 + Latex的大镜像"]
|
||||||
|
|
||||||
|
A --> W4["IV. 🚀其他部署方法"]
|
||||||
|
W4 --> C1["1. Windows/MacOS 一键安装运行脚本(推荐⭐)"]
|
||||||
|
W4 --> C2["2. Huggingface, Sealos远程部署"]
|
||||||
|
W4 --> C4["3. ... 其他 ..."]
|
||||||
|
```
|
||||||
|
|
||||||
### 安装方法I:直接运行 (Windows, Linux or MacOS)
|
### 安装方法I:直接运行 (Windows, Linux or MacOS)
|
||||||
|
|
||||||
1. 下载项目
|
1. 下载项目
|
||||||
```sh
|
|
||||||
git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
|
|
||||||
cd gpt_academic
|
|
||||||
```
|
|
||||||
|
|
||||||
2. 配置API_KEY
|
```sh
|
||||||
|
git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
|
||||||
|
cd gpt_academic
|
||||||
|
```
|
||||||
|
|
||||||
在`config.py`中,配置API KEY等设置,[点击查看特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1) 。[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
|
2. 配置API_KEY等变量
|
||||||
|
|
||||||
「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解该读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中(仅复制您修改过的配置条目即可)。 」
|
在`config.py`中,配置API KEY等变量。[特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1)、[Wiki-项目配置说明](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
|
||||||
|
|
||||||
「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。配置读取优先级: `环境变量` > `config_private.py` > `config.py`。 」
|
「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解以上读取逻辑,我们强烈建议您在`config.py`同路径下创建一个名为`config_private.py`的新配置文件,并使用`config_private.py`配置项目,从而确保自动更新时不会丢失配置 」。
|
||||||
|
|
||||||
|
「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。配置读取优先级: `环境变量` > `config_private.py` > `config.py` 」。
|
||||||
|
|
||||||
|
|
||||||
3. 安装依赖
|
3. 安装依赖
|
||||||
```sh
|
```sh
|
||||||
# (选择I: 如熟悉python, python推荐版本 3.9 ~ 3.11)备注:使用官方pip源或者阿里pip源, 临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
# (选择I: 如熟悉python, python推荐版本 3.9 ~ 3.11)备注:使用官方pip源或者阿里pip源, 临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||||
python -m pip install -r requirements.txt
|
python -m pip install -r requirements.txt
|
||||||
|
|
||||||
# (选择II: 使用Anaconda)步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr):
|
# (选择II: 使用Anaconda)步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||||
conda create -n gptac_venv python=3.11 # 创建anaconda环境
|
conda create -n gptac_venv python=3.11 # 创建anaconda环境
|
||||||
conda activate gptac_venv # 激活anaconda环境
|
conda activate gptac_venv # 激活anaconda环境
|
||||||
python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步骤
|
python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步骤
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
<details><summary>如果需要支持清华ChatGLM2/复旦MOSS/RWKV作为后端,请点击展开此处</summary>
|
<details><summary>如果需要支持清华ChatGLM2/复旦MOSS/RWKV作为后端,请点击展开此处</summary>
|
||||||
<p>
|
<p>
|
||||||
|
|
||||||
【可选步骤】如果需要支持清华ChatGLM2/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
|
【可选步骤】如果需要支持清华ChatGLM3/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
# 【可选步骤I】支持清华ChatGLM2。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
# 【可选步骤I】支持清华ChatGLM3。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||||
|
|
||||||
# 【可选步骤II】支持复旦MOSS
|
# 【可选步骤II】支持复旦MOSS
|
||||||
@@ -135,6 +183,14 @@ git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss #
|
|||||||
|
|
||||||
# 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案):
|
# 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案):
|
||||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||||
|
|
||||||
|
# 【可选步骤V】支持本地模型INT8,INT4量化(这里所指的模型本身不是量化版本,目前deepseek-coder支持,后面测试后会加入更多模型量化选择)
|
||||||
|
pip install bitsandbyte
|
||||||
|
# windows用户安装bitsandbytes需要使用下面bitsandbytes-windows-webui
|
||||||
|
python -m pip install bitsandbytes --prefer-binary --extra-index-url=https://jllllll.github.io/bitsandbytes-windows-webui
|
||||||
|
pip install -U git+https://github.com/huggingface/transformers.git
|
||||||
|
pip install -U git+https://github.com/huggingface/accelerate.git
|
||||||
|
pip install peft
|
||||||
```
|
```
|
||||||
|
|
||||||
</p>
|
</p>
|
||||||
@@ -143,62 +199,64 @@ AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-
|
|||||||
|
|
||||||
|
|
||||||
4. 运行
|
4. 运行
|
||||||
```sh
|
```sh
|
||||||
python main.py
|
python main.py
|
||||||
```
|
```
|
||||||
|
|
||||||
### 安装方法II:使用Docker
|
### 安装方法II:使用Docker
|
||||||
|
|
||||||
0. 部署项目的全部能力(这个是包含cuda和latex的大型镜像。但如果您网速慢、硬盘小,则不推荐使用这个)
|
0. 部署项目的全部能力(这个是包含cuda和latex的大型镜像。但如果您网速慢、硬盘小,则不推荐该方法部署完整项目)
|
||||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml)
|
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml)
|
||||||
|
|
||||||
``` sh
|
``` sh
|
||||||
# 修改docker-compose.yml,保留方案0并删除其他方案。然后运行:
|
# 修改docker-compose.yml,保留方案0并删除其他方案。然后运行:
|
||||||
docker-compose up
|
docker-compose up
|
||||||
```
|
```
|
||||||
|
|
||||||
1. 仅ChatGPT+文心一言+spark等在线模型(推荐大多数人选择)
|
1. 仅ChatGPT + GLM4 + 文心一言+spark等在线模型(推荐大多数人选择)
|
||||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml)
|
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml)
|
||||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml)
|
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml)
|
||||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml)
|
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml)
|
||||||
|
|
||||||
``` sh
|
``` sh
|
||||||
# 修改docker-compose.yml,保留方案1并删除其他方案。然后运行:
|
# 修改docker-compose.yml,保留方案1并删除其他方案。然后运行:
|
||||||
docker-compose up
|
docker-compose up
|
||||||
```
|
```
|
||||||
|
|
||||||
P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以直接使用方案4或者方案0获取Latex功能。
|
P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以直接使用方案4或者方案0获取Latex功能。
|
||||||
|
|
||||||
2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时)
|
2. ChatGPT + GLM3 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时)
|
||||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml)
|
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml)
|
||||||
|
|
||||||
``` sh
|
``` sh
|
||||||
# 修改docker-compose.yml,保留方案2并删除其他方案。然后运行:
|
# 修改docker-compose.yml,保留方案2并删除其他方案。然后运行:
|
||||||
docker-compose up
|
docker-compose up
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### 安装方法III:其他部署姿势
|
### 安装方法III:其他部署方法
|
||||||
1. **Windows一键运行脚本**。
|
1. **Windows一键运行脚本**。
|
||||||
完全不熟悉python环境的Windows用户可以下载[Release](https://github.com/binary-husky/gpt_academic/releases)中发布的一键运行脚本安装无本地模型的版本。
|
完全不熟悉python环境的Windows用户可以下载[Release](https://github.com/binary-husky/gpt_academic/releases)中发布的一键运行脚本安装无本地模型的版本。脚本贡献来源:[oobabooga](https://github.com/oobabooga/one-click-installers)。
|
||||||
脚本的贡献来源是[oobabooga](https://github.com/oobabooga/one-click-installers)。
|
|
||||||
|
|
||||||
2. 使用第三方API、Azure等、文心一言、星火等,见[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)
|
2. 使用第三方API、Azure等、文心一言、星火等,见[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)
|
||||||
|
|
||||||
3. 云服务器远程部署避坑指南。
|
3. 云服务器远程部署避坑指南。
|
||||||
请访问[云服务器远程部署wiki](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
请访问[云服务器远程部署wiki](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||||
|
|
||||||
4. 一些新型的部署平台或方法
|
4. 在其他平台部署&二级网址部署
|
||||||
- 使用Sealos[一键部署](https://github.com/binary-husky/gpt_academic/issues/993)。
|
- 使用Sealos[一键部署](https://github.com/binary-husky/gpt_academic/issues/993)。
|
||||||
- 使用WSL2(Windows Subsystem for Linux 子系统)。请访问[部署wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
- 使用WSL2(Windows Subsystem for Linux 子系统)。请访问[部署wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||||
- 如何在二级网址(如`http://localhost/subpath`)下运行。请访问[FastAPI运行说明](docs/WithFastapi.md)
|
- 如何在二级网址(如`http://localhost/subpath`)下运行。请访问[FastAPI运行说明](docs/WithFastapi.md)
|
||||||
|
|
||||||
|
<br><br>
|
||||||
|
|
||||||
# Advanced Usage
|
# Advanced Usage
|
||||||
### I:自定义新的便捷按钮(学术快捷键)
|
### I:自定义新的便捷按钮(学术快捷键)
|
||||||
任意文本编辑器打开`core_functional.py`,添加条目如下,然后重启程序。(如按钮已存在,那么前缀、后缀都支持热修改,无需重启程序即可生效。)
|
|
||||||
|
任意文本编辑器打开`core_functional.py`,添加如下条目,然后重启程序。(如果按钮已存在,那么可以直接修改(前缀、后缀都已支持热修改),无需重启程序即可生效。)
|
||||||
例如
|
例如
|
||||||
```
|
|
||||||
|
```python
|
||||||
"超级英译中": {
|
"超级英译中": {
|
||||||
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
||||||
"Prefix": "请翻译把下面一段内容成中文,然后用一个markdown表格逐一解释文中出现的专有名词:\n\n",
|
"Prefix": "请翻译把下面一段内容成中文,然后用一个markdown表格逐一解释文中出现的专有名词:\n\n",
|
||||||
@@ -207,6 +265,7 @@ docker-compose up
|
|||||||
"Suffix": "",
|
"Suffix": "",
|
||||||
},
|
},
|
||||||
```
|
```
|
||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||||
</div>
|
</div>
|
||||||
@@ -216,6 +275,7 @@ docker-compose up
|
|||||||
本项目的插件编写、调试难度很低,只要您具备一定的python基础知识,就可以仿照我们提供的模板实现自己的插件功能。
|
本项目的插件编写、调试难度很低,只要您具备一定的python基础知识,就可以仿照我们提供的模板实现自己的插件功能。
|
||||||
详情请参考[函数插件指南](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)。
|
详情请参考[函数插件指南](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)。
|
||||||
|
|
||||||
|
<br><br>
|
||||||
|
|
||||||
# Updates
|
# Updates
|
||||||
### I:动态
|
### I:动态
|
||||||
@@ -264,9 +324,9 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
|
|||||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
8. OpenAI音频解析与总结
|
8. 基于mermaid的流图、脑图绘制
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/c518b82f-bd53-46e2-baf5-ad1b081c1da4" width="500" >
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
9. Latex全文校对纠错
|
9. Latex全文校对纠错
|
||||||
@@ -283,7 +343,8 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
|
|||||||
|
|
||||||
|
|
||||||
### II:版本:
|
### II:版本:
|
||||||
- version 3.70(todo): 优化AutoGen插件主题并设计一系列衍生插件
|
- version 3.80(TODO): 优化AutoGen插件主题并设计一系列衍生插件
|
||||||
|
- version 3.70: 引入Mermaid绘图,实现GPT画脑图等功能
|
||||||
- version 3.60: 引入AutoGen作为新一代插件的基石
|
- version 3.60: 引入AutoGen作为新一代插件的基石
|
||||||
- version 3.57: 支持GLM3,星火v3,文心一言v4,修复本地模型的并发BUG
|
- version 3.57: 支持GLM3,星火v3,文心一言v4,修复本地模型的并发BUG
|
||||||
- version 3.56: 支持动态追加基础功能按钮,新汇报PDF汇总页面
|
- version 3.56: 支持动态追加基础功能按钮,新汇报PDF汇总页面
|
||||||
@@ -303,7 +364,7 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
|
|||||||
- version 3.0: 对chatglm和其他小型llm的支持
|
- version 3.0: 对chatglm和其他小型llm的支持
|
||||||
- version 2.6: 重构了插件结构,提高了交互性,加入更多插件
|
- version 2.6: 重构了插件结构,提高了交互性,加入更多插件
|
||||||
- version 2.5: 自更新,解决总结大工程源代码时文本过长、token溢出的问题
|
- version 2.5: 自更新,解决总结大工程源代码时文本过长、token溢出的问题
|
||||||
- version 2.4: (1)新增PDF全文翻译功能; (2)新增输入区切换位置的功能; (3)新增垂直布局选项; (4)多线程函数插件优化。
|
- version 2.4: 新增PDF全文翻译功能; 新增输入区切换位置的功能
|
||||||
- version 2.3: 增强多线程交互性
|
- version 2.3: 增强多线程交互性
|
||||||
- version 2.2: 函数插件支持热重载
|
- version 2.2: 函数插件支持热重载
|
||||||
- version 2.1: 可折叠式布局
|
- version 2.1: 可折叠式布局
|
||||||
@@ -314,7 +375,33 @@ GPT Academic开发者QQ群:`610599535`
|
|||||||
|
|
||||||
- 已知问题
|
- 已知问题
|
||||||
- 某些浏览器翻译插件干扰此软件前端的运行
|
- 某些浏览器翻译插件干扰此软件前端的运行
|
||||||
- 官方Gradio目前有很多兼容性Bug,请务必使用`requirement.txt`安装Gradio
|
- 官方Gradio目前有很多兼容性问题,请**务必使用`requirement.txt`安装Gradio**
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
timeline LR
|
||||||
|
title GPT-Academic项目发展历程
|
||||||
|
section 2.x
|
||||||
|
1.0~2.2: 基础功能: 引入模块化函数插件: 可折叠式布局: 函数插件支持热重载
|
||||||
|
2.3~2.5: 增强多线程交互性: 新增PDF全文翻译功能: 新增输入区切换位置的功能: 自更新
|
||||||
|
2.6: 重构了插件结构: 提高了交互性: 加入更多插件
|
||||||
|
section 3.x
|
||||||
|
3.0~3.1: 对chatglm支持: 对其他小型llm支持: 支持同时问询多个gpt模型: 支持多个apikey负载均衡
|
||||||
|
3.2~3.3: 函数插件支持更多参数接口: 保存对话功能: 解读任意语言代码: 同时询问任意的LLM组合: 互联网信息综合功能
|
||||||
|
3.4: 加入arxiv论文翻译: 加入latex论文批改功能
|
||||||
|
3.44: 正式支持Azure: 优化界面易用性
|
||||||
|
3.46: 自定义ChatGLM2微调模型: 实时语音对话
|
||||||
|
3.49: 支持阿里达摩院通义千问: 上海AI-Lab书生: 讯飞星火: 支持百度千帆平台 & 文心一言
|
||||||
|
3.50: 虚空终端: 支持插件分类: 改进UI: 设计新主题
|
||||||
|
3.53: 动态选择不同界面主题: 提高稳定性: 解决多用户冲突问题
|
||||||
|
3.55: 动态代码解释器: 重构前端界面: 引入悬浮窗口与菜单栏
|
||||||
|
3.56: 动态追加基础功能按钮: 新汇报PDF汇总页面
|
||||||
|
3.57: GLM3, 星火v3: 支持文心一言v4: 修复本地模型的并发BUG
|
||||||
|
3.60: 引入AutoGen
|
||||||
|
3.70: 引入Mermaid绘图: 实现GPT画脑图等功能
|
||||||
|
3.80(TODO): 优化AutoGen插件主题: 设计衍生插件
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### III:主题
|
### III:主题
|
||||||
可以通过修改`THEME`选项(config.py)变更主题
|
可以通过修改`THEME`选项(config.py)变更主题
|
||||||
@@ -325,7 +412,8 @@ GPT Academic开发者QQ群:`610599535`
|
|||||||
|
|
||||||
1. `master` 分支: 主分支,稳定版
|
1. `master` 分支: 主分支,稳定版
|
||||||
2. `frontier` 分支: 开发分支,测试版
|
2. `frontier` 分支: 开发分支,测试版
|
||||||
|
3. 如何[接入其他大模型](request_llms/README.md)
|
||||||
|
4. 访问GPT-Academic的[在线服务并支持我们](https://github.com/binary-husky/gpt_academic/wiki/online)
|
||||||
|
|
||||||
### V:参考与学习
|
### V:参考与学习
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ def check_proxy(proxies):
|
|||||||
try:
|
try:
|
||||||
response = requests.get("https://ipapi.co/json/", proxies=proxies, timeout=4)
|
response = requests.get("https://ipapi.co/json/", proxies=proxies, timeout=4)
|
||||||
data = response.json()
|
data = response.json()
|
||||||
# print(f'查询代理的地理位置,返回的结果是{data}')
|
|
||||||
if 'country_name' in data:
|
if 'country_name' in data:
|
||||||
country = data['country_name']
|
country = data['country_name']
|
||||||
result = f"代理配置 {proxies_https}, 代理所在地:{country}"
|
result = f"代理配置 {proxies_https}, 代理所在地:{country}"
|
||||||
@@ -47,8 +46,8 @@ def backup_and_download(current_version, remote_version):
|
|||||||
os.makedirs(new_version_dir)
|
os.makedirs(new_version_dir)
|
||||||
shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history'])
|
shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history'])
|
||||||
proxies = get_conf('proxies')
|
proxies = get_conf('proxies')
|
||||||
r = requests.get(
|
try: r = requests.get('https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True)
|
||||||
'https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True)
|
except: r = requests.get('https://public.gpt-academic.top/publish/master.zip', proxies=proxies, stream=True)
|
||||||
zip_file_path = backup_dir+'/master.zip'
|
zip_file_path = backup_dir+'/master.zip'
|
||||||
with open(zip_file_path, 'wb+') as f:
|
with open(zip_file_path, 'wb+') as f:
|
||||||
f.write(r.content)
|
f.write(r.content)
|
||||||
@@ -111,11 +110,10 @@ def auto_update(raise_error=False):
|
|||||||
try:
|
try:
|
||||||
from toolbox import get_conf
|
from toolbox import get_conf
|
||||||
import requests
|
import requests
|
||||||
import time
|
|
||||||
import json
|
import json
|
||||||
proxies = get_conf('proxies')
|
proxies = get_conf('proxies')
|
||||||
response = requests.get(
|
try: response = requests.get("https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5)
|
||||||
"https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5)
|
except: response = requests.get("https://public.gpt-academic.top/publish/version", proxies=proxies, timeout=5)
|
||||||
remote_json_data = json.loads(response.text)
|
remote_json_data = json.loads(response.text)
|
||||||
remote_version = remote_json_data['version']
|
remote_version = remote_json_data['version']
|
||||||
if remote_json_data["show_feature"]:
|
if remote_json_data["show_feature"]:
|
||||||
@@ -127,8 +125,7 @@ def auto_update(raise_error=False):
|
|||||||
current_version = json.loads(current_version)['version']
|
current_version = json.loads(current_version)['version']
|
||||||
if (remote_version - current_version) >= 0.01-1e-5:
|
if (remote_version - current_version) >= 0.01-1e-5:
|
||||||
from colorful import print亮黄
|
from colorful import print亮黄
|
||||||
print亮黄(
|
print亮黄(f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}。{new_feature}')
|
||||||
f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}。{new_feature}')
|
|
||||||
print('(1)Github更新地址:\nhttps://github.com/binary-husky/chatgpt_academic\n')
|
print('(1)Github更新地址:\nhttps://github.com/binary-husky/chatgpt_academic\n')
|
||||||
user_instruction = input('(2)是否一键更新代码(Y+回车=确认,输入其他/无输入+回车=不更新)?')
|
user_instruction = input('(2)是否一键更新代码(Y+回车=确认,输入其他/无输入+回车=不更新)?')
|
||||||
if user_instruction in ['Y', 'y']:
|
if user_instruction in ['Y', 'y']:
|
||||||
@@ -154,7 +151,7 @@ def auto_update(raise_error=False):
|
|||||||
print(msg)
|
print(msg)
|
||||||
|
|
||||||
def warm_up_modules():
|
def warm_up_modules():
|
||||||
print('正在执行一些模块的预热...')
|
print('正在执行一些模块的预热 ...')
|
||||||
from toolbox import ProxyNetworkActivate
|
from toolbox import ProxyNetworkActivate
|
||||||
from request_llms.bridge_all import model_info
|
from request_llms.bridge_all import model_info
|
||||||
with ProxyNetworkActivate("Warmup_Modules"):
|
with ProxyNetworkActivate("Warmup_Modules"):
|
||||||
@@ -163,6 +160,14 @@ def warm_up_modules():
|
|||||||
enc = model_info["gpt-4"]['tokenizer']
|
enc = model_info["gpt-4"]['tokenizer']
|
||||||
enc.encode("模块预热", disallowed_special=())
|
enc.encode("模块预热", disallowed_special=())
|
||||||
|
|
||||||
|
def warm_up_vectordb():
|
||||||
|
print('正在执行一些模块的预热 ...')
|
||||||
|
from toolbox import ProxyNetworkActivate
|
||||||
|
with ProxyNetworkActivate("Warmup_Modules"):
|
||||||
|
import nltk
|
||||||
|
with ProxyNetworkActivate("Warmup_Modules"): nltk.download("punkt")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
import os
|
import os
|
||||||
os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||||
|
|||||||
87
config.py
87
config.py
@@ -15,13 +15,13 @@ API_KEY = "此处填API密钥" # 可同时填写多个API-KEY,用英文逗
|
|||||||
USE_PROXY = False
|
USE_PROXY = False
|
||||||
if USE_PROXY:
|
if USE_PROXY:
|
||||||
"""
|
"""
|
||||||
|
代理网络的地址,打开你的代理软件查看代理协议(socks5h / http)、地址(localhost)和端口(11284)
|
||||||
填写格式是 [协议]:// [地址] :[端口],填写之前不要忘记把USE_PROXY改成True,如果直接在海外服务器部署,此处不修改
|
填写格式是 [协议]:// [地址] :[端口],填写之前不要忘记把USE_PROXY改成True,如果直接在海外服务器部署,此处不修改
|
||||||
<配置教程&视频教程> https://github.com/binary-husky/gpt_academic/issues/1>
|
<配置教程&视频教程> https://github.com/binary-husky/gpt_academic/issues/1>
|
||||||
[协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http
|
[协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http
|
||||||
[地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上)
|
[地址] 填localhost或者127.0.0.1(localhost意思是代理软件安装在本机上)
|
||||||
[端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
|
[端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
|
||||||
"""
|
"""
|
||||||
# 代理网络的地址,打开你的*学*网软件查看代理的协议(socks5h / http)、地址(localhost)和端口(11284)
|
|
||||||
proxies = {
|
proxies = {
|
||||||
# [协议]:// [地址] :[端口]
|
# [协议]:// [地址] :[端口]
|
||||||
"http": "socks5h://localhost:11284", # 再例如 "http": "http://127.0.0.1:7890",
|
"http": "socks5h://localhost:11284", # 再例如 "http": "http://127.0.0.1:7890",
|
||||||
@@ -86,20 +86,33 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
|||||||
|
|
||||||
|
|
||||||
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
||||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
|
||||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview",
|
AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview",
|
||||||
"gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
||||||
"api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
|
|
||||||
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4",
|
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4",
|
||||||
"chatglm3", "moss", "newbing", "claude-2"]
|
"gemini-pro", "chatglm3", "claude-2", "zhipuai"]
|
||||||
# P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random"
|
# P.S. 其他可用的模型还包括 [
|
||||||
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
|
# "moss", "qwen-turbo", "qwen-plus", "qwen-max"
|
||||||
|
# "zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen-local", "gpt-3.5-turbo-0613",
|
||||||
|
# "gpt-3.5-turbo-16k-0613", "gpt-3.5-random", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
|
||||||
|
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"
|
||||||
|
# ]
|
||||||
|
|
||||||
|
|
||||||
# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4"
|
# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4"
|
||||||
MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
|
MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
|
||||||
|
|
||||||
|
|
||||||
|
# 选择本地模型变体(只有当AVAIL_LLM_MODELS包含了对应本地模型时,才会起作用)
|
||||||
|
# 如果你选择Qwen系列的模型,那么请在下面的QWEN_MODEL_SELECTION中指定具体的模型
|
||||||
|
# 也可以是具体的模型路径
|
||||||
|
QWEN_LOCAL_MODEL_SELECTION = "Qwen/Qwen-1_8B-Chat-Int8"
|
||||||
|
|
||||||
|
|
||||||
|
# 接入通义千问在线大模型 https://dashscope.console.aliyun.com/
|
||||||
|
DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY
|
||||||
|
|
||||||
|
|
||||||
# 百度千帆(LLM_MODEL="qianfan")
|
# 百度千帆(LLM_MODEL="qianfan")
|
||||||
BAIDU_CLOUD_API_KEY = ''
|
BAIDU_CLOUD_API_KEY = ''
|
||||||
BAIDU_CLOUD_SECRET_KEY = ''
|
BAIDU_CLOUD_SECRET_KEY = ''
|
||||||
@@ -114,7 +127,6 @@ CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b
|
|||||||
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
||||||
LOCAL_MODEL_QUANT = "FP16" # 默认 "FP16" "INT4" 启用量化INT4版本 "INT8" 启用量化INT8版本
|
LOCAL_MODEL_QUANT = "FP16" # 默认 "FP16" "INT4" 启用量化INT4版本 "INT8" 启用量化INT8版本
|
||||||
|
|
||||||
|
|
||||||
# 设置gradio的并行线程数(不需要修改)
|
# 设置gradio的并行线程数(不需要修改)
|
||||||
CONCURRENT_COUNT = 100
|
CONCURRENT_COUNT = 100
|
||||||
|
|
||||||
@@ -183,7 +195,13 @@ XFYUN_API_KEY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
|||||||
|
|
||||||
# 接入智谱大模型
|
# 接入智谱大模型
|
||||||
ZHIPUAI_API_KEY = ""
|
ZHIPUAI_API_KEY = ""
|
||||||
ZHIPUAI_MODEL = "chatglm_turbo"
|
ZHIPUAI_MODEL = "glm-4" # 可选 "glm-3-turbo" "glm-4"
|
||||||
|
|
||||||
|
|
||||||
|
# # 火山引擎YUNQUE大模型
|
||||||
|
# YUNQUE_SECRET_KEY = ""
|
||||||
|
# YUNQUE_ACCESS_KEY = ""
|
||||||
|
# YUNQUE_MODEL = ""
|
||||||
|
|
||||||
|
|
||||||
# Claude API KEY
|
# Claude API KEY
|
||||||
@@ -194,6 +212,10 @@ ANTHROPIC_API_KEY = ""
|
|||||||
CUSTOM_API_KEY_PATTERN = ""
|
CUSTOM_API_KEY_PATTERN = ""
|
||||||
|
|
||||||
|
|
||||||
|
# Google Gemini API-Key
|
||||||
|
GEMINI_API_KEY = ''
|
||||||
|
|
||||||
|
|
||||||
# HUGGINGFACE的TOKEN,下载LLAMA时起作用 https://huggingface.co/docs/hub/security-tokens
|
# HUGGINGFACE的TOKEN,下载LLAMA时起作用 https://huggingface.co/docs/hub/security-tokens
|
||||||
HUGGINGFACE_ACCESS_TOKEN = "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"
|
HUGGINGFACE_ACCESS_TOKEN = "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"
|
||||||
|
|
||||||
@@ -232,14 +254,13 @@ WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid",
|
|||||||
BLOCK_INVALID_APIKEY = False
|
BLOCK_INVALID_APIKEY = False
|
||||||
|
|
||||||
|
|
||||||
|
# 启用插件热加载
|
||||||
|
PLUGIN_HOT_RELOAD = False
|
||||||
|
|
||||||
|
|
||||||
# 自定义按钮的最大数量限制
|
# 自定义按钮的最大数量限制
|
||||||
NUM_CUSTOM_BASIC_BTN = 4
|
NUM_CUSTOM_BASIC_BTN = 4
|
||||||
|
|
||||||
|
|
||||||
# LATEX实验性功能
|
|
||||||
LATEX_EXPERIMENTAL = False
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
在线大模型配置关联关系示意图
|
在线大模型配置关联关系示意图
|
||||||
│
|
│
|
||||||
@@ -276,11 +297,37 @@ LATEX_EXPERIMENTAL = False
|
|||||||
│ ├── BAIDU_CLOUD_API_KEY
|
│ ├── BAIDU_CLOUD_API_KEY
|
||||||
│ └── BAIDU_CLOUD_SECRET_KEY
|
│ └── BAIDU_CLOUD_SECRET_KEY
|
||||||
│
|
│
|
||||||
├── "newbing" Newbing接口不再稳定,不推荐使用
|
├── "zhipuai" 智谱AI大模型chatglm_turbo
|
||||||
|
│ ├── ZHIPUAI_API_KEY
|
||||||
|
│ └── ZHIPUAI_MODEL
|
||||||
|
│
|
||||||
|
├── "qwen-turbo" 等通义千问大模型
|
||||||
|
│ └── DASHSCOPE_API_KEY
|
||||||
|
│
|
||||||
|
├── "Gemini"
|
||||||
|
│ └── GEMINI_API_KEY
|
||||||
|
│
|
||||||
|
└── "newbing" Newbing接口不再稳定,不推荐使用
|
||||||
├── NEWBING_STYLE
|
├── NEWBING_STYLE
|
||||||
└── NEWBING_COOKIES
|
└── NEWBING_COOKIES
|
||||||
|
|
||||||
|
|
||||||
|
本地大模型示意图
|
||||||
|
│
|
||||||
|
├── "chatglm3"
|
||||||
|
├── "chatglm"
|
||||||
|
├── "chatglm_onnx"
|
||||||
|
├── "chatglmft"
|
||||||
|
├── "internlm"
|
||||||
|
├── "moss"
|
||||||
|
├── "jittorllms_pangualpha"
|
||||||
|
├── "jittorllms_llama"
|
||||||
|
├── "deepseekcoder"
|
||||||
|
├── "qwen-local"
|
||||||
|
├── RWKV的支持见Wiki
|
||||||
|
└── "llama2"
|
||||||
|
|
||||||
|
|
||||||
用户图形界面布局依赖关系示意图
|
用户图形界面布局依赖关系示意图
|
||||||
│
|
│
|
||||||
├── CHATBOT_HEIGHT 对话窗的高度
|
├── CHATBOT_HEIGHT 对话窗的高度
|
||||||
@@ -291,7 +338,7 @@ LATEX_EXPERIMENTAL = False
|
|||||||
├── THEME 色彩主题
|
├── THEME 色彩主题
|
||||||
├── AUTO_CLEAR_TXT 是否在提交时自动清空输入框
|
├── AUTO_CLEAR_TXT 是否在提交时自动清空输入框
|
||||||
├── ADD_WAIFU 加一个live2d装饰
|
├── ADD_WAIFU 加一个live2d装饰
|
||||||
├── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性
|
└── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性
|
||||||
|
|
||||||
|
|
||||||
插件在线服务配置依赖关系示意图
|
插件在线服务配置依赖关系示意图
|
||||||
@@ -303,7 +350,7 @@ LATEX_EXPERIMENTAL = False
|
|||||||
│ ├── ALIYUN_ACCESSKEY
|
│ ├── ALIYUN_ACCESSKEY
|
||||||
│ └── ALIYUN_SECRET
|
│ └── ALIYUN_SECRET
|
||||||
│
|
│
|
||||||
├── PDF文档精准解析
|
└── PDF文档精准解析
|
||||||
│ └── GROBID_URLS
|
└── GROBID_URLS
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -3,30 +3,69 @@
|
|||||||
# 'stop' 颜色对应 theme.py 中的 color_er
|
# 'stop' 颜色对应 theme.py 中的 color_er
|
||||||
import importlib
|
import importlib
|
||||||
from toolbox import clear_line_break
|
from toolbox import clear_line_break
|
||||||
|
from toolbox import apply_gpt_academic_string_mask_langbased
|
||||||
|
from toolbox import build_gpt_academic_masked_string_langbased
|
||||||
|
from textwrap import dedent
|
||||||
|
|
||||||
def get_core_functions():
|
def get_core_functions():
|
||||||
return {
|
return {
|
||||||
"英语学术润色": {
|
|
||||||
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
"学术语料润色": {
|
||||||
"Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " +
|
# [1*] 前缀字符串,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等。
|
||||||
r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " +
|
# 这里填一个提示词字符串就行了,这里为了区分中英文情景搞复杂了一点
|
||||||
r"Firstly, you should provide the polished paragraph. "
|
"Prefix": build_gpt_academic_masked_string_langbased(
|
||||||
r"Secondly, you should list all your modification and explain the reasons to do so in markdown table." + "\n\n",
|
text_show_english=
|
||||||
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, "
|
||||||
|
r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. "
|
||||||
|
r"Firstly, you should provide the polished paragraph. "
|
||||||
|
r"Secondly, you should list all your modification and explain the reasons to do so in markdown table.",
|
||||||
|
text_show_chinese=
|
||||||
|
r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性,"
|
||||||
|
r"同时分解长句,减少重复,并提供改进建议。请先提供文本的更正版本,然后在markdown表格中列出修改的内容,并给出修改的理由:"
|
||||||
|
) + "\n\n",
|
||||||
|
# [2*] 后缀字符串,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
||||||
"Suffix": r"",
|
"Suffix": r"",
|
||||||
# 按钮颜色 (默认 secondary)
|
# [3] 按钮颜色 (可选参数,默认 secondary)
|
||||||
"Color": r"secondary",
|
"Color": r"secondary",
|
||||||
# 按钮是否可见 (默认 True,即可见)
|
# [4] 按钮是否可见 (可选参数,默认 True,即可见)
|
||||||
"Visible": True,
|
"Visible": True,
|
||||||
# 是否在触发时清除历史 (默认 False,即不处理之前的对话历史)
|
# [5] 是否在触发时清除历史 (可选参数,默认 False,即不处理之前的对话历史)
|
||||||
"AutoClearHistory": False
|
"AutoClearHistory": False,
|
||||||
|
# [6] 文本预处理 (可选参数,默认 None,举例:写个函数移除所有的换行符)
|
||||||
|
"PreProcess": None,
|
||||||
},
|
},
|
||||||
"中文学术润色": {
|
|
||||||
"Prefix": r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性," +
|
|
||||||
r"同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本" + "\n\n",
|
"总结绘制脑图": {
|
||||||
"Suffix": r"",
|
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
||||||
|
"Prefix": r"",
|
||||||
|
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
||||||
|
"Suffix":
|
||||||
|
# dedent() 函数用于去除多行字符串的缩进
|
||||||
|
dedent("\n"+r'''
|
||||||
|
==============================
|
||||||
|
|
||||||
|
使用mermaid flowchart对以上文本进行总结,概括上述段落的内容以及内在逻辑关系,例如:
|
||||||
|
|
||||||
|
以下是对以上文本的总结,以mermaid flowchart的形式展示:
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
A["节点名1"] --> B("节点名2")
|
||||||
|
B --> C{"节点名3"}
|
||||||
|
C --> D["节点名4"]
|
||||||
|
C --> |"箭头名1"| E["节点名5"]
|
||||||
|
C --> |"箭头名2"| F["节点名6"]
|
||||||
|
```
|
||||||
|
|
||||||
|
警告:
|
||||||
|
(1)使用中文
|
||||||
|
(2)节点名字使用引号包裹,如["Laptop"]
|
||||||
|
(3)`|` 和 `"`之间不要存在空格
|
||||||
|
(4)根据情况选择flowchart LR(从左到右)或者flowchart TD(从上到下)
|
||||||
|
'''),
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
||||||
"查找语法错误": {
|
"查找语法错误": {
|
||||||
"Prefix": r"Help me ensure that the grammar and the spelling is correct. "
|
"Prefix": r"Help me ensure that the grammar and the spelling is correct. "
|
||||||
r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good. "
|
r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good. "
|
||||||
@@ -46,42 +85,61 @@ def get_core_functions():
|
|||||||
"Suffix": r"",
|
"Suffix": r"",
|
||||||
"PreProcess": clear_line_break, # 预处理:清除换行符
|
"PreProcess": clear_line_break, # 预处理:清除换行符
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
||||||
"中译英": {
|
"中译英": {
|
||||||
"Prefix": r"Please translate following sentence to English:" + "\n\n",
|
"Prefix": r"Please translate following sentence to English:" + "\n\n",
|
||||||
"Suffix": r"",
|
"Suffix": r"",
|
||||||
},
|
},
|
||||||
"学术中英互译": {
|
|
||||||
"Prefix": r"I want you to act as a scientific English-Chinese translator, " +
|
|
||||||
r"I will provide you with some paragraphs in one language " +
|
"学术英中互译": {
|
||||||
r"and your task is to accurately and academically translate the paragraphs only into the other language. " +
|
"Prefix": build_gpt_academic_masked_string_langbased(
|
||||||
r"Do not repeat the original provided paragraphs after translation. " +
|
text_show_chinese=
|
||||||
r"You should use artificial intelligence tools, " +
|
r"I want you to act as a scientific English-Chinese translator, "
|
||||||
r"such as natural language processing, and rhetorical knowledge " +
|
r"I will provide you with some paragraphs in one language "
|
||||||
r"and experience about effective writing techniques to reply. " +
|
r"and your task is to accurately and academically translate the paragraphs only into the other language. "
|
||||||
r"I'll give you my paragraphs as follows, tell me what language it is written in, and then translate:" + "\n\n",
|
r"Do not repeat the original provided paragraphs after translation. "
|
||||||
"Suffix": "",
|
r"You should use artificial intelligence tools, "
|
||||||
"Color": "secondary",
|
r"such as natural language processing, and rhetorical knowledge "
|
||||||
|
r"and experience about effective writing techniques to reply. "
|
||||||
|
r"I'll give you my paragraphs as follows, tell me what language it is written in, and then translate:",
|
||||||
|
text_show_english=
|
||||||
|
r"你是经验丰富的翻译,请把以下学术文章段落翻译成中文,"
|
||||||
|
r"并同时充分考虑中文的语法、清晰、简洁和整体可读性,"
|
||||||
|
r"必要时,你可以修改整个句子的顺序以确保翻译后的段落符合中文的语言习惯。"
|
||||||
|
r"你需要翻译的文本如下:"
|
||||||
|
) + "\n\n",
|
||||||
|
"Suffix": r"",
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
||||||
"英译中": {
|
"英译中": {
|
||||||
"Prefix": r"翻译成地道的中文:" + "\n\n",
|
"Prefix": r"翻译成地道的中文:" + "\n\n",
|
||||||
"Suffix": r"",
|
"Suffix": r"",
|
||||||
"Visible": False,
|
"Visible": False,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
||||||
"找图片": {
|
"找图片": {
|
||||||
"Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL," +
|
"Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL,"
|
||||||
r"然后请使用Markdown格式封装,并且不要有反斜线,不要用代码块。现在,请按以下描述给我发送图片:" + "\n\n",
|
r"然后请使用Markdown格式封装,并且不要有反斜线,不要用代码块。现在,请按以下描述给我发送图片:" + "\n\n",
|
||||||
"Suffix": r"",
|
"Suffix": r"",
|
||||||
"Visible": False,
|
"Visible": False,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
||||||
"解释代码": {
|
"解释代码": {
|
||||||
"Prefix": r"请解释以下代码:" + "\n```\n",
|
"Prefix": r"请解释以下代码:" + "\n```\n",
|
||||||
"Suffix": "\n```\n",
|
"Suffix": "\n```\n",
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
||||||
"参考文献转Bib": {
|
"参考文献转Bib": {
|
||||||
"Prefix": r"Here are some bibliography items, please transform them into bibtex style." +
|
"Prefix": r"Here are some bibliography items, please transform them into bibtex style."
|
||||||
r"Note that, reference styles maybe more than one kind, you should transform each item correctly." +
|
r"Note that, reference styles maybe more than one kind, you should transform each item correctly."
|
||||||
r"Items need to be transformed:",
|
r"Items need to be transformed:" + "\n\n",
|
||||||
"Visible": False,
|
"Visible": False,
|
||||||
"Suffix": r"",
|
"Suffix": r"",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -98,8 +156,18 @@ def handle_core_functionality(additional_fn, inputs, history, chatbot):
|
|||||||
return inputs, history
|
return inputs, history
|
||||||
else:
|
else:
|
||||||
# 预制功能
|
# 预制功能
|
||||||
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
if "PreProcess" in core_functional[additional_fn]:
|
||||||
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
if core_functional[additional_fn]["PreProcess"] is not None:
|
||||||
|
inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
||||||
|
# 为字符串加上上面定义的前缀和后缀。
|
||||||
|
inputs = apply_gpt_academic_string_mask_langbased(
|
||||||
|
string = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"],
|
||||||
|
lang_reference = inputs,
|
||||||
|
)
|
||||||
if core_functional[additional_fn].get("AutoClearHistory", False):
|
if core_functional[additional_fn].get("AutoClearHistory", False):
|
||||||
history = []
|
history = []
|
||||||
return inputs, history
|
return inputs, history
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
t = get_core_functions()["总结绘制脑图"]
|
||||||
|
print(t["Prefix"] + t["Suffix"])
|
||||||
@@ -32,115 +32,122 @@ def get_crazy_functions():
|
|||||||
from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入
|
from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入
|
||||||
from crazy_functions.Latex全文润色 import Latex中文润色
|
from crazy_functions.Latex全文润色 import Latex中文润色
|
||||||
from crazy_functions.Latex全文润色 import Latex英文纠错
|
from crazy_functions.Latex全文润色 import Latex英文纠错
|
||||||
from crazy_functions.Latex全文翻译 import Latex中译英
|
|
||||||
from crazy_functions.Latex全文翻译 import Latex英译中
|
|
||||||
from crazy_functions.批量Markdown翻译 import Markdown中译英
|
from crazy_functions.批量Markdown翻译 import Markdown中译英
|
||||||
from crazy_functions.虚空终端 import 虚空终端
|
from crazy_functions.虚空终端 import 虚空终端
|
||||||
|
from crazy_functions.生成多种Mermaid图表 import 生成多种Mermaid图表
|
||||||
|
|
||||||
function_plugins = {
|
function_plugins = {
|
||||||
"虚空终端": {
|
"虚空终端": {
|
||||||
"Group": "对话|编程|学术|智能体",
|
"Group": "对话|编程|学术|智能体",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": True,
|
"AsButton": True,
|
||||||
"Function": HotReload(虚空终端)
|
"Function": HotReload(虚空终端),
|
||||||
},
|
},
|
||||||
"解析整个Python项目": {
|
"解析整个Python项目": {
|
||||||
"Group": "编程",
|
"Group": "编程",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": True,
|
"AsButton": True,
|
||||||
"Info": "解析一个Python项目的所有源文件(.py) | 输入参数为路径",
|
"Info": "解析一个Python项目的所有源文件(.py) | 输入参数为路径",
|
||||||
"Function": HotReload(解析一个Python项目)
|
"Function": HotReload(解析一个Python项目),
|
||||||
},
|
},
|
||||||
"载入对话历史存档(先上传存档或输入路径)": {
|
"载入对话历史存档(先上传存档或输入路径)": {
|
||||||
"Group": "对话",
|
"Group": "对话",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False,
|
"AsButton": False,
|
||||||
"Info": "载入对话历史存档 | 输入参数为路径",
|
"Info": "载入对话历史存档 | 输入参数为路径",
|
||||||
"Function": HotReload(载入对话历史存档)
|
"Function": HotReload(载入对话历史存档),
|
||||||
},
|
},
|
||||||
"删除所有本地对话历史记录(谨慎操作)": {
|
"删除所有本地对话历史记录(谨慎操作)": {
|
||||||
"Group": "对话",
|
"Group": "对话",
|
||||||
"AsButton": False,
|
"AsButton": False,
|
||||||
"Info": "删除所有本地对话历史记录,谨慎操作 | 不需要输入参数",
|
"Info": "删除所有本地对话历史记录,谨慎操作 | 不需要输入参数",
|
||||||
"Function": HotReload(删除所有本地对话历史记录)
|
"Function": HotReload(删除所有本地对话历史记录),
|
||||||
},
|
},
|
||||||
"清除所有缓存文件(谨慎操作)": {
|
"清除所有缓存文件(谨慎操作)": {
|
||||||
"Group": "对话",
|
"Group": "对话",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数",
|
"Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数",
|
||||||
"Function": HotReload(清除缓存)
|
"Function": HotReload(清除缓存),
|
||||||
|
},
|
||||||
|
"生成多种Mermaid图表(从当前对话或文件(.pdf/.md)中生产图表)": {
|
||||||
|
"Group": "对话",
|
||||||
|
"Color": "stop",
|
||||||
|
"AsButton": False,
|
||||||
|
"Info" : "基于当前对话或PDF生成多种Mermaid图表,图表类型由模型判断",
|
||||||
|
"Function": HotReload(生成多种Mermaid图表),
|
||||||
|
"AdvancedArgs": True,
|
||||||
|
"ArgsReminder": "请输入图类型对应的数字,不输入则为模型自行判断:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图,9-思维导图",
|
||||||
},
|
},
|
||||||
"批量总结Word文档": {
|
"批量总结Word文档": {
|
||||||
"Group": "学术",
|
"Group": "学术",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": True,
|
"AsButton": True,
|
||||||
"Info": "批量总结word文档 | 输入参数为路径",
|
"Info": "批量总结word文档 | 输入参数为路径",
|
||||||
"Function": HotReload(总结word文档)
|
"Function": HotReload(总结word文档),
|
||||||
},
|
},
|
||||||
"解析整个Matlab项目": {
|
"解析整个Matlab项目": {
|
||||||
"Group": "编程",
|
"Group": "编程",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False,
|
"AsButton": False,
|
||||||
"Info": "解析一个Matlab项目的所有源文件(.m) | 输入参数为路径",
|
"Info": "解析一个Matlab项目的所有源文件(.m) | 输入参数为路径",
|
||||||
"Function": HotReload(解析一个Matlab项目)
|
"Function": HotReload(解析一个Matlab项目),
|
||||||
},
|
},
|
||||||
"解析整个C++项目头文件": {
|
"解析整个C++项目头文件": {
|
||||||
"Group": "编程",
|
"Group": "编程",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"Info": "解析一个C++项目的所有头文件(.h/.hpp) | 输入参数为路径",
|
"Info": "解析一个C++项目的所有头文件(.h/.hpp) | 输入参数为路径",
|
||||||
"Function": HotReload(解析一个C项目的头文件)
|
"Function": HotReload(解析一个C项目的头文件),
|
||||||
},
|
},
|
||||||
"解析整个C++项目(.cpp/.hpp/.c/.h)": {
|
"解析整个C++项目(.cpp/.hpp/.c/.h)": {
|
||||||
"Group": "编程",
|
"Group": "编程",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"Info": "解析一个C++项目的所有源文件(.cpp/.hpp/.c/.h)| 输入参数为路径",
|
"Info": "解析一个C++项目的所有源文件(.cpp/.hpp/.c/.h)| 输入参数为路径",
|
||||||
"Function": HotReload(解析一个C项目)
|
"Function": HotReload(解析一个C项目),
|
||||||
},
|
},
|
||||||
"解析整个Go项目": {
|
"解析整个Go项目": {
|
||||||
"Group": "编程",
|
"Group": "编程",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"Info": "解析一个Go项目的所有源文件 | 输入参数为路径",
|
"Info": "解析一个Go项目的所有源文件 | 输入参数为路径",
|
||||||
"Function": HotReload(解析一个Golang项目)
|
"Function": HotReload(解析一个Golang项目),
|
||||||
},
|
},
|
||||||
"解析整个Rust项目": {
|
"解析整个Rust项目": {
|
||||||
"Group": "编程",
|
"Group": "编程",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"Info": "解析一个Rust项目的所有源文件 | 输入参数为路径",
|
"Info": "解析一个Rust项目的所有源文件 | 输入参数为路径",
|
||||||
"Function": HotReload(解析一个Rust项目)
|
"Function": HotReload(解析一个Rust项目),
|
||||||
},
|
},
|
||||||
"解析整个Java项目": {
|
"解析整个Java项目": {
|
||||||
"Group": "编程",
|
"Group": "编程",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"Info": "解析一个Java项目的所有源文件 | 输入参数为路径",
|
"Info": "解析一个Java项目的所有源文件 | 输入参数为路径",
|
||||||
"Function": HotReload(解析一个Java项目)
|
"Function": HotReload(解析一个Java项目),
|
||||||
},
|
},
|
||||||
"解析整个前端项目(js,ts,css等)": {
|
"解析整个前端项目(js,ts,css等)": {
|
||||||
"Group": "编程",
|
"Group": "编程",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"Info": "解析一个前端项目的所有源文件(js,ts,css等) | 输入参数为路径",
|
"Info": "解析一个前端项目的所有源文件(js,ts,css等) | 输入参数为路径",
|
||||||
"Function": HotReload(解析一个前端项目)
|
"Function": HotReload(解析一个前端项目),
|
||||||
},
|
},
|
||||||
"解析整个Lua项目": {
|
"解析整个Lua项目": {
|
||||||
"Group": "编程",
|
"Group": "编程",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"Info": "解析一个Lua项目的所有源文件 | 输入参数为路径",
|
"Info": "解析一个Lua项目的所有源文件 | 输入参数为路径",
|
||||||
"Function": HotReload(解析一个Lua项目)
|
"Function": HotReload(解析一个Lua项目),
|
||||||
},
|
},
|
||||||
"解析整个CSharp项目": {
|
"解析整个CSharp项目": {
|
||||||
"Group": "编程",
|
"Group": "编程",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"Info": "解析一个CSharp项目的所有源文件 | 输入参数为路径",
|
"Info": "解析一个CSharp项目的所有源文件 | 输入参数为路径",
|
||||||
"Function": HotReload(解析一个CSharp项目)
|
"Function": HotReload(解析一个CSharp项目),
|
||||||
},
|
},
|
||||||
"解析Jupyter Notebook文件": {
|
"解析Jupyter Notebook文件": {
|
||||||
"Group": "编程",
|
"Group": "编程",
|
||||||
@@ -156,103 +163,104 @@ def get_crazy_functions():
|
|||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False,
|
"AsButton": False,
|
||||||
"Info": "读取Tex论文并写摘要 | 输入参数为路径",
|
"Info": "读取Tex论文并写摘要 | 输入参数为路径",
|
||||||
"Function": HotReload(读文章写摘要)
|
"Function": HotReload(读文章写摘要),
|
||||||
},
|
},
|
||||||
"翻译README或MD": {
|
"翻译README或MD": {
|
||||||
"Group": "编程",
|
"Group": "编程",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": True,
|
"AsButton": True,
|
||||||
"Info": "将Markdown翻译为中文 | 输入参数为路径或URL",
|
"Info": "将Markdown翻译为中文 | 输入参数为路径或URL",
|
||||||
"Function": HotReload(Markdown英译中)
|
"Function": HotReload(Markdown英译中),
|
||||||
},
|
},
|
||||||
"翻译Markdown或README(支持Github链接)": {
|
"翻译Markdown或README(支持Github链接)": {
|
||||||
"Group": "编程",
|
"Group": "编程",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False,
|
"AsButton": False,
|
||||||
"Info": "将Markdown或README翻译为中文 | 输入参数为路径或URL",
|
"Info": "将Markdown或README翻译为中文 | 输入参数为路径或URL",
|
||||||
"Function": HotReload(Markdown英译中)
|
"Function": HotReload(Markdown英译中),
|
||||||
},
|
},
|
||||||
"批量生成函数注释": {
|
"批量生成函数注释": {
|
||||||
"Group": "编程",
|
"Group": "编程",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"Info": "批量生成函数的注释 | 输入参数为路径",
|
"Info": "批量生成函数的注释 | 输入参数为路径",
|
||||||
"Function": HotReload(批量生成函数注释)
|
"Function": HotReload(批量生成函数注释),
|
||||||
},
|
},
|
||||||
"保存当前的对话": {
|
"保存当前的对话": {
|
||||||
"Group": "对话",
|
"Group": "对话",
|
||||||
"AsButton": True,
|
"AsButton": True,
|
||||||
"Info": "保存当前的对话 | 不需要输入参数",
|
"Info": "保存当前的对话 | 不需要输入参数",
|
||||||
"Function": HotReload(对话历史存档)
|
"Function": HotReload(对话历史存档),
|
||||||
},
|
},
|
||||||
"[多线程Demo]解析此项目本身(源码自译解)": {
|
"[多线程Demo]解析此项目本身(源码自译解)": {
|
||||||
"Group": "对话|编程",
|
"Group": "对话|编程",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"Info": "多线程解析并翻译此项目的源码 | 不需要输入参数",
|
"Info": "多线程解析并翻译此项目的源码 | 不需要输入参数",
|
||||||
"Function": HotReload(解析项目本身)
|
"Function": HotReload(解析项目本身),
|
||||||
},
|
},
|
||||||
"历史上的今天": {
|
"历史上的今天": {
|
||||||
"Group": "对话",
|
"Group": "对话",
|
||||||
"AsButton": True,
|
"AsButton": True,
|
||||||
"Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数",
|
"Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数",
|
||||||
"Function": HotReload(高阶功能模板函数)
|
"Function": HotReload(高阶功能模板函数),
|
||||||
},
|
},
|
||||||
"精准翻译PDF论文": {
|
"精准翻译PDF论文": {
|
||||||
"Group": "学术",
|
"Group": "学术",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": True,
|
"AsButton": True,
|
||||||
"Info": "精准翻译PDF论文为中文 | 输入参数为路径",
|
"Info": "精准翻译PDF论文为中文 | 输入参数为路径",
|
||||||
"Function": HotReload(批量翻译PDF文档)
|
"Function": HotReload(批量翻译PDF文档),
|
||||||
},
|
},
|
||||||
"询问多个GPT模型": {
|
"询问多个GPT模型": {
|
||||||
"Group": "对话",
|
"Group": "对话",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": True,
|
"AsButton": True,
|
||||||
"Function": HotReload(同时问询)
|
"Function": HotReload(同时问询),
|
||||||
},
|
},
|
||||||
"批量总结PDF文档": {
|
"批量总结PDF文档": {
|
||||||
"Group": "学术",
|
"Group": "学术",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"Info": "批量总结PDF文档的内容 | 输入参数为路径",
|
"Info": "批量总结PDF文档的内容 | 输入参数为路径",
|
||||||
"Function": HotReload(批量总结PDF文档)
|
"Function": HotReload(批量总结PDF文档),
|
||||||
},
|
},
|
||||||
"谷歌学术检索助手(输入谷歌学术搜索页url)": {
|
"谷歌学术检索助手(输入谷歌学术搜索页url)": {
|
||||||
"Group": "学术",
|
"Group": "学术",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"Info": "使用谷歌学术检索助手搜索指定URL的结果 | 输入参数为谷歌学术搜索页的URL",
|
"Info": "使用谷歌学术检索助手搜索指定URL的结果 | 输入参数为谷歌学术搜索页的URL",
|
||||||
"Function": HotReload(谷歌检索小助手)
|
"Function": HotReload(谷歌检索小助手),
|
||||||
},
|
},
|
||||||
"理解PDF文档内容 (模仿ChatPDF)": {
|
"理解PDF文档内容 (模仿ChatPDF)": {
|
||||||
"Group": "学术",
|
"Group": "学术",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"Info": "理解PDF文档的内容并进行回答 | 输入参数为路径",
|
"Info": "理解PDF文档的内容并进行回答 | 输入参数为路径",
|
||||||
"Function": HotReload(理解PDF文档内容标准文件输入)
|
"Function": HotReload(理解PDF文档内容标准文件输入),
|
||||||
},
|
},
|
||||||
"英文Latex项目全文润色(输入路径或上传压缩包)": {
|
"英文Latex项目全文润色(输入路径或上传压缩包)": {
|
||||||
"Group": "学术",
|
"Group": "学术",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"Info": "对英文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
|
"Info": "对英文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
|
||||||
"Function": HotReload(Latex英文润色)
|
"Function": HotReload(Latex英文润色),
|
||||||
},
|
|
||||||
"英文Latex项目全文纠错(输入路径或上传压缩包)": {
|
|
||||||
"Group": "学术",
|
|
||||||
"Color": "stop",
|
|
||||||
"AsButton": False, # 加入下拉菜单中
|
|
||||||
"Info": "对英文Latex项目全文进行纠错处理 | 输入参数为路径或上传压缩包",
|
|
||||||
"Function": HotReload(Latex英文纠错)
|
|
||||||
},
|
},
|
||||||
|
|
||||||
"中文Latex项目全文润色(输入路径或上传压缩包)": {
|
"中文Latex项目全文润色(输入路径或上传压缩包)": {
|
||||||
"Group": "学术",
|
"Group": "学术",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"Info": "对中文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
|
"Info": "对中文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
|
||||||
"Function": HotReload(Latex中文润色)
|
"Function": HotReload(Latex中文润色),
|
||||||
},
|
},
|
||||||
|
# 已经被新插件取代
|
||||||
|
# "英文Latex项目全文纠错(输入路径或上传压缩包)": {
|
||||||
|
# "Group": "学术",
|
||||||
|
# "Color": "stop",
|
||||||
|
# "AsButton": False, # 加入下拉菜单中
|
||||||
|
# "Info": "对英文Latex项目全文进行纠错处理 | 输入参数为路径或上传压缩包",
|
||||||
|
# "Function": HotReload(Latex英文纠错),
|
||||||
|
# },
|
||||||
# 已经被新插件取代
|
# 已经被新插件取代
|
||||||
# "Latex项目全文中译英(输入路径或上传压缩包)": {
|
# "Latex项目全文中译英(输入路径或上传压缩包)": {
|
||||||
# "Group": "学术",
|
# "Group": "学术",
|
||||||
@@ -261,7 +269,6 @@ def get_crazy_functions():
|
|||||||
# "Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包",
|
# "Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包",
|
||||||
# "Function": HotReload(Latex中译英)
|
# "Function": HotReload(Latex中译英)
|
||||||
# },
|
# },
|
||||||
|
|
||||||
# 已经被新插件取代
|
# 已经被新插件取代
|
||||||
# "Latex项目全文英译中(输入路径或上传压缩包)": {
|
# "Latex项目全文英译中(输入路径或上传压缩包)": {
|
||||||
# "Group": "学术",
|
# "Group": "学术",
|
||||||
@@ -270,315 +277,405 @@ def get_crazy_functions():
|
|||||||
# "Info": "对Latex项目全文进行英译中处理 | 输入参数为路径或上传压缩包",
|
# "Info": "对Latex项目全文进行英译中处理 | 输入参数为路径或上传压缩包",
|
||||||
# "Function": HotReload(Latex英译中)
|
# "Function": HotReload(Latex英译中)
|
||||||
# },
|
# },
|
||||||
|
|
||||||
"批量Markdown中译英(输入路径或上传压缩包)": {
|
"批量Markdown中译英(输入路径或上传压缩包)": {
|
||||||
"Group": "编程",
|
"Group": "编程",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"AsButton": False, # 加入下拉菜单中
|
||||||
"Info": "批量将Markdown文件中文翻译为英文 | 输入参数为路径或上传压缩包",
|
"Info": "批量将Markdown文件中文翻译为英文 | 输入参数为路径或上传压缩包",
|
||||||
"Function": HotReload(Markdown中译英)
|
"Function": HotReload(Markdown中译英),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
# -=--=- 尚未充分测试的实验性插件 & 需要额外依赖的插件 -=--=-
|
# -=--=- 尚未充分测试的实验性插件 & 需要额外依赖的插件 -=--=-
|
||||||
try:
|
try:
|
||||||
from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要
|
from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要
|
||||||
function_plugins.update({
|
|
||||||
"一键下载arxiv论文并翻译摘要(先在input输入编号,如1812.10695)": {
|
function_plugins.update(
|
||||||
"Group": "学术",
|
{
|
||||||
"Color": "stop",
|
"一键下载arxiv论文并翻译摘要(先在input输入编号,如1812.10695)": {
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"Group": "学术",
|
||||||
# "Info": "下载arxiv论文并翻译摘要 | 输入参数为arxiv编号如1812.10695",
|
"Color": "stop",
|
||||||
"Function": HotReload(下载arxiv论文并翻译摘要)
|
"AsButton": False, # 加入下拉菜单中
|
||||||
|
# "Info": "下载arxiv论文并翻译摘要 | 输入参数为arxiv编号如1812.10695",
|
||||||
|
"Function": HotReload(下载arxiv论文并翻译摘要),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
print('Load function plugin failed')
|
print("Load function plugin failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
||||||
function_plugins.update({
|
|
||||||
"连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
|
function_plugins.update(
|
||||||
"Group": "对话",
|
{
|
||||||
"Color": "stop",
|
"连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"Group": "对话",
|
||||||
# "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
|
"Color": "stop",
|
||||||
"Function": HotReload(连接网络回答问题)
|
"AsButton": False, # 加入下拉菜单中
|
||||||
|
# "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
|
||||||
|
"Function": HotReload(连接网络回答问题),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
|
from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
|
||||||
function_plugins.update({
|
|
||||||
"连接网络回答问题(中文Bing版,输入问题后点击该插件)": {
|
function_plugins.update(
|
||||||
"Group": "对话",
|
{
|
||||||
"Color": "stop",
|
"连接网络回答问题(中文Bing版,输入问题后点击该插件)": {
|
||||||
"AsButton": False, # 加入下拉菜单中
|
"Group": "对话",
|
||||||
"Info": "连接网络回答问题(需要访问中文Bing)| 输入参数是一个问题",
|
"Color": "stop",
|
||||||
"Function": HotReload(连接bing搜索回答问题)
|
"AsButton": False, # 加入下拉菜单中
|
||||||
|
"Info": "连接网络回答问题(需要访问中文Bing)| 输入参数是一个问题",
|
||||||
|
"Function": HotReload(连接bing搜索回答问题),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
print('Load function plugin failed')
|
print("Load function plugin failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from crazy_functions.解析项目源代码 import 解析任意code项目
|
from crazy_functions.解析项目源代码 import 解析任意code项目
|
||||||
function_plugins.update({
|
|
||||||
"解析项目源代码(手动指定和筛选源代码文件类型)": {
|
function_plugins.update(
|
||||||
"Group": "编程",
|
{
|
||||||
"Color": "stop",
|
"解析项目源代码(手动指定和筛选源代码文件类型)": {
|
||||||
"AsButton": False,
|
"Group": "编程",
|
||||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
"Color": "stop",
|
||||||
"ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示
|
"AsButton": False,
|
||||||
"Function": HotReload(解析任意code项目)
|
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||||
},
|
"ArgsReminder": '输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: "*.c, ^*.cpp, config.toml, ^*.toml"', # 高级参数输入区的显示提示
|
||||||
})
|
"Function": HotReload(解析任意code项目),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
print('Load function plugin failed')
|
print("Load function plugin failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
|
from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
|
||||||
function_plugins.update({
|
|
||||||
"询问多个GPT模型(手动指定询问哪些模型)": {
|
function_plugins.update(
|
||||||
"Group": "对话",
|
{
|
||||||
"Color": "stop",
|
"询问多个GPT模型(手动指定询问哪些模型)": {
|
||||||
"AsButton": False,
|
"Group": "对话",
|
||||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
"Color": "stop",
|
||||||
"ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示
|
"AsButton": False,
|
||||||
"Function": HotReload(同时问询_指定模型)
|
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||||
},
|
"ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&gpt-4", # 高级参数输入区的显示提示
|
||||||
})
|
"Function": HotReload(同时问询_指定模型),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
print('Load function plugin failed')
|
print("Load function plugin failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3
|
from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2
|
||||||
function_plugins.update({
|
|
||||||
"图片生成_DALLE2 (先切换模型到openai或api2d)": {
|
function_plugins.update(
|
||||||
"Group": "对话",
|
{
|
||||||
"Color": "stop",
|
"图片生成_DALLE2 (先切换模型到gpt-*)": {
|
||||||
"AsButton": False,
|
"Group": "对话",
|
||||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
"Color": "stop",
|
||||||
"ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示
|
"AsButton": False,
|
||||||
"Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容",
|
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||||
"Function": HotReload(图片生成_DALLE2)
|
"ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示
|
||||||
},
|
"Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容",
|
||||||
})
|
"Function": HotReload(图片生成_DALLE2),
|
||||||
function_plugins.update({
|
},
|
||||||
"图片生成_DALLE3 (先切换模型到openai或api2d)": {
|
}
|
||||||
"Group": "对话",
|
)
|
||||||
"Color": "stop",
|
function_plugins.update(
|
||||||
"AsButton": False,
|
{
|
||||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
"图片生成_DALLE3 (先切换模型到gpt-*)": {
|
||||||
"ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 1024x1024, 1792x1024, 1024x1792。如需生成高清图像,请输入 1024x1024-HD, 1792x1024-HD, 1024x1792-HD。", # 高级参数输入区的显示提示
|
"Group": "对话",
|
||||||
"Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容",
|
"Color": "stop",
|
||||||
"Function": HotReload(图片生成_DALLE3)
|
"AsButton": False,
|
||||||
},
|
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||||
})
|
"ArgsReminder": "在这里输入自定义参数「分辨率-质量(可选)-风格(可选)」, 参数示例「1024x1024-hd-vivid」 || 分辨率支持 「1024x1024」(默认) /「1792x1024」/「1024x1792」 || 质量支持 「-standard」(默认) /「-hd」 || 风格支持 「-vivid」(默认) /「-natural」", # 高级参数输入区的显示提示
|
||||||
|
"Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容",
|
||||||
|
"Function": HotReload(图片生成_DALLE3),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
function_plugins.update(
|
||||||
|
{
|
||||||
|
"图片修改_DALLE2 (先切换模型到gpt-*)": {
|
||||||
|
"Group": "对话",
|
||||||
|
"Color": "stop",
|
||||||
|
"AsButton": False,
|
||||||
|
"AdvancedArgs": False, # 调用时,唤起高级参数输入区(默认False)
|
||||||
|
# "Info": "使用DALLE2修改图片 | 输入参数字符串,提供图像的内容",
|
||||||
|
"Function": HotReload(图片修改_DALLE2),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
print('Load function plugin failed')
|
print("Load function plugin failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from crazy_functions.总结音视频 import 总结音视频
|
from crazy_functions.总结音视频 import 总结音视频
|
||||||
function_plugins.update({
|
|
||||||
"批量总结音视频(输入路径或上传压缩包)": {
|
function_plugins.update(
|
||||||
"Group": "对话",
|
{
|
||||||
"Color": "stop",
|
"批量总结音视频(输入路径或上传压缩包)": {
|
||||||
"AsButton": False,
|
"Group": "对话",
|
||||||
"AdvancedArgs": True,
|
"Color": "stop",
|
||||||
"ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。",
|
"AsButton": False,
|
||||||
"Info": "批量总结音频或视频 | 输入参数为路径",
|
"AdvancedArgs": True,
|
||||||
"Function": HotReload(总结音视频)
|
"ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。",
|
||||||
|
"Info": "批量总结音频或视频 | 输入参数为路径",
|
||||||
|
"Function": HotReload(总结音视频),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
print('Load function plugin failed')
|
print("Load function plugin failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from crazy_functions.数学动画生成manim import 动画生成
|
from crazy_functions.数学动画生成manim import 动画生成
|
||||||
function_plugins.update({
|
|
||||||
"数学动画生成(Manim)": {
|
function_plugins.update(
|
||||||
"Group": "对话",
|
{
|
||||||
"Color": "stop",
|
"数学动画生成(Manim)": {
|
||||||
"AsButton": False,
|
"Group": "对话",
|
||||||
"Info": "按照自然语言描述生成一个动画 | 输入参数是一段话",
|
"Color": "stop",
|
||||||
"Function": HotReload(动画生成)
|
"AsButton": False,
|
||||||
|
"Info": "按照自然语言描述生成一个动画 | 输入参数是一段话",
|
||||||
|
"Function": HotReload(动画生成),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
print('Load function plugin failed')
|
print("Load function plugin failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言
|
from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言
|
||||||
function_plugins.update({
|
|
||||||
"Markdown翻译(指定翻译成何种语言)": {
|
function_plugins.update(
|
||||||
"Group": "编程",
|
{
|
||||||
"Color": "stop",
|
"Markdown翻译(指定翻译成何种语言)": {
|
||||||
"AsButton": False,
|
"Group": "编程",
|
||||||
"AdvancedArgs": True,
|
"Color": "stop",
|
||||||
"ArgsReminder": "请输入要翻译成哪种语言,默认为Chinese。",
|
"AsButton": False,
|
||||||
"Function": HotReload(Markdown翻译指定语言)
|
"AdvancedArgs": True,
|
||||||
|
"ArgsReminder": "请输入要翻译成哪种语言,默认为Chinese。",
|
||||||
|
"Function": HotReload(Markdown翻译指定语言),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
print('Load function plugin failed')
|
print("Load function plugin failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from crazy_functions.Langchain知识库 import 知识库问答
|
from crazy_functions.知识库问答 import 知识库文件注入
|
||||||
function_plugins.update({
|
|
||||||
"构建知识库(先上传文件素材,再运行此插件)": {
|
function_plugins.update(
|
||||||
"Group": "对话",
|
{
|
||||||
"Color": "stop",
|
"构建知识库(先上传文件素材,再运行此插件)": {
|
||||||
"AsButton": False,
|
"Group": "对话",
|
||||||
"AdvancedArgs": True,
|
"Color": "stop",
|
||||||
"ArgsReminder": "此处待注入的知识库名称id, 默认为default。文件进入知识库后可长期保存。可以通过再次调用本插件的方式,向知识库追加更多文档。",
|
"AsButton": False,
|
||||||
"Function": HotReload(知识库问答)
|
"AdvancedArgs": True,
|
||||||
|
"ArgsReminder": "此处待注入的知识库名称id, 默认为default。文件进入知识库后可长期保存。可以通过再次调用本插件的方式,向知识库追加更多文档。",
|
||||||
|
"Function": HotReload(知识库文件注入),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
print('Load function plugin failed')
|
print("Load function plugin failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from crazy_functions.Langchain知识库 import 读取知识库作答
|
from crazy_functions.知识库问答 import 读取知识库作答
|
||||||
function_plugins.update({
|
|
||||||
"知识库问答(构建知识库后,再运行此插件)": {
|
function_plugins.update(
|
||||||
"Group": "对话",
|
{
|
||||||
"Color": "stop",
|
"知识库文件注入(构建知识库后,再运行此插件)": {
|
||||||
"AsButton": False,
|
"Group": "对话",
|
||||||
"AdvancedArgs": True,
|
"Color": "stop",
|
||||||
"ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要构建知识库后再运行此插件。",
|
"AsButton": False,
|
||||||
"Function": HotReload(读取知识库作答)
|
"AdvancedArgs": True,
|
||||||
|
"ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要构建知识库后再运行此插件。",
|
||||||
|
"Function": HotReload(读取知识库作答),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
print('Load function plugin failed')
|
print("Load function plugin failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from crazy_functions.交互功能函数模板 import 交互功能模板函数
|
from crazy_functions.交互功能函数模板 import 交互功能模板函数
|
||||||
function_plugins.update({
|
|
||||||
"交互功能模板Demo函数(查找wallhaven.cc的壁纸)": {
|
function_plugins.update(
|
||||||
"Group": "对话",
|
{
|
||||||
"Color": "stop",
|
"交互功能模板Demo函数(查找wallhaven.cc的壁纸)": {
|
||||||
"AsButton": False,
|
"Group": "对话",
|
||||||
"Function": HotReload(交互功能模板函数)
|
"Color": "stop",
|
||||||
|
"AsButton": False,
|
||||||
|
"Function": HotReload(交互功能模板函数),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
print('Load function plugin failed')
|
print("Load function plugin failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比
|
from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比
|
||||||
function_plugins.update({
|
|
||||||
"Latex英文纠错+高亮修正位置 [需Latex]": {
|
|
||||||
"Group": "学术",
|
|
||||||
"Color": "stop",
|
|
||||||
"AsButton": False,
|
|
||||||
"AdvancedArgs": True,
|
|
||||||
"ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。",
|
|
||||||
"Function": HotReload(Latex英文纠错加PDF对比)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
|
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
|
||||||
function_plugins.update({
|
|
||||||
"Arixv论文精细翻译(输入arxivID)[需Latex]": {
|
function_plugins.update(
|
||||||
"Group": "学术",
|
{
|
||||||
"Color": "stop",
|
"Latex英文纠错+高亮修正位置 [需Latex]": {
|
||||||
"AsButton": False,
|
"Group": "学术",
|
||||||
"AdvancedArgs": True,
|
"Color": "stop",
|
||||||
"ArgsReminder":
|
"AsButton": False,
|
||||||
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " +
|
"AdvancedArgs": True,
|
||||||
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " +
|
"ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。",
|
||||||
'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
"Function": HotReload(Latex英文纠错加PDF对比),
|
||||||
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
|
},
|
||||||
"Function": HotReload(Latex翻译中文并重新编译PDF)
|
"Arxiv论文精细翻译(输入arxivID)[需Latex]": {
|
||||||
|
"Group": "学术",
|
||||||
|
"Color": "stop",
|
||||||
|
"AsButton": False,
|
||||||
|
"AdvancedArgs": True,
|
||||||
|
"ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
||||||
|
+ "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
||||||
|
+ 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||||
|
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
|
||||||
|
"Function": HotReload(Latex翻译中文并重新编译PDF),
|
||||||
|
},
|
||||||
|
"本地Latex论文精细翻译(上传Latex项目)[需Latex]": {
|
||||||
|
"Group": "学术",
|
||||||
|
"Color": "stop",
|
||||||
|
"AsButton": False,
|
||||||
|
"AdvancedArgs": True,
|
||||||
|
"ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
||||||
|
+ "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
||||||
|
+ 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||||
|
"Info": "本地Latex论文精细翻译 | 输入参数是路径",
|
||||||
|
"Function": HotReload(Latex翻译中文并重新编译PDF),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
function_plugins.update({
|
|
||||||
"本地Latex论文精细翻译(上传Latex项目)[需Latex]": {
|
|
||||||
"Group": "学术",
|
|
||||||
"Color": "stop",
|
|
||||||
"AsButton": False,
|
|
||||||
"AdvancedArgs": True,
|
|
||||||
"ArgsReminder":
|
|
||||||
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " +
|
|
||||||
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " +
|
|
||||||
'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
|
||||||
"Info": "本地Latex论文精细翻译 | 输入参数是路径",
|
|
||||||
"Function": HotReload(Latex翻译中文并重新编译PDF)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
print('Load function plugin failed')
|
print("Load function plugin failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from toolbox import get_conf
|
from toolbox import get_conf
|
||||||
ENABLE_AUDIO = get_conf('ENABLE_AUDIO')
|
|
||||||
|
ENABLE_AUDIO = get_conf("ENABLE_AUDIO")
|
||||||
if ENABLE_AUDIO:
|
if ENABLE_AUDIO:
|
||||||
from crazy_functions.语音助手 import 语音助手
|
from crazy_functions.语音助手 import 语音助手
|
||||||
function_plugins.update({
|
|
||||||
"实时语音对话": {
|
function_plugins.update(
|
||||||
"Group": "对话",
|
{
|
||||||
"Color": "stop",
|
"实时语音对话": {
|
||||||
"AsButton": True,
|
"Group": "对话",
|
||||||
"Info": "这是一个时刻聆听着的语音对话助手 | 没有输入参数",
|
"Color": "stop",
|
||||||
"Function": HotReload(语音助手)
|
"AsButton": True,
|
||||||
|
"Info": "这是一个时刻聆听着的语音对话助手 | 没有输入参数",
|
||||||
|
"Function": HotReload(语音助手),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
print('Load function plugin failed')
|
print("Load function plugin failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from crazy_functions.批量翻译PDF文档_NOUGAT import 批量翻译PDF文档
|
from crazy_functions.批量翻译PDF文档_NOUGAT import 批量翻译PDF文档
|
||||||
function_plugins.update({
|
|
||||||
"精准翻译PDF文档(NOUGAT)": {
|
function_plugins.update(
|
||||||
"Group": "学术",
|
{
|
||||||
"Color": "stop",
|
"精准翻译PDF文档(NOUGAT)": {
|
||||||
"AsButton": False,
|
"Group": "学术",
|
||||||
"Function": HotReload(批量翻译PDF文档)
|
"Color": "stop",
|
||||||
|
"AsButton": False,
|
||||||
|
"Function": HotReload(批量翻译PDF文档),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
print('Load function plugin failed')
|
print("Load function plugin failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from crazy_functions.函数动态生成 import 函数动态生成
|
from crazy_functions.函数动态生成 import 函数动态生成
|
||||||
function_plugins.update({
|
|
||||||
"动态代码解释器(CodeInterpreter)": {
|
function_plugins.update(
|
||||||
"Group": "智能体",
|
{
|
||||||
"Color": "stop",
|
"动态代码解释器(CodeInterpreter)": {
|
||||||
"AsButton": False,
|
"Group": "智能体",
|
||||||
"Function": HotReload(函数动态生成)
|
"Color": "stop",
|
||||||
|
"AsButton": False,
|
||||||
|
"Function": HotReload(函数动态生成),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
print('Load function plugin failed')
|
print("Load function plugin failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from crazy_functions.多智能体 import 多智能体终端
|
from crazy_functions.多智能体 import 多智能体终端
|
||||||
function_plugins.update({
|
|
||||||
"AutoGen多智能体终端(仅供测试)": {
|
function_plugins.update(
|
||||||
"Group": "智能体",
|
{
|
||||||
"Color": "stop",
|
"AutoGen多智能体终端(仅供测试)": {
|
||||||
"AsButton": False,
|
"Group": "智能体",
|
||||||
"Function": HotReload(多智能体终端)
|
"Color": "stop",
|
||||||
|
"AsButton": False,
|
||||||
|
"Function": HotReload(多智能体终端),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
print('Load function plugin failed')
|
print("Load function plugin failed")
|
||||||
|
|
||||||
|
try:
|
||||||
|
from crazy_functions.互动小游戏 import 随机小游戏
|
||||||
|
|
||||||
|
function_plugins.update(
|
||||||
|
{
|
||||||
|
"随机互动小游戏(仅供测试)": {
|
||||||
|
"Group": "智能体",
|
||||||
|
"Color": "stop",
|
||||||
|
"AsButton": False,
|
||||||
|
"Function": HotReload(随机小游戏),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
except:
|
||||||
|
print(trimmed_format_exc())
|
||||||
|
print("Load function plugin failed")
|
||||||
|
|
||||||
|
# try:
|
||||||
|
# from crazy_functions.高级功能函数模板 import 测试图表渲染
|
||||||
|
# function_plugins.update({
|
||||||
|
# "绘制逻辑关系(测试图表渲染)": {
|
||||||
|
# "Group": "智能体",
|
||||||
|
# "Color": "stop",
|
||||||
|
# "AsButton": True,
|
||||||
|
# "Function": HotReload(测试图表渲染)
|
||||||
|
# }
|
||||||
|
# })
|
||||||
|
# except:
|
||||||
|
# print(trimmed_format_exc())
|
||||||
|
# print('Load function plugin failed')
|
||||||
|
|
||||||
# try:
|
# try:
|
||||||
# from crazy_functions.chatglm微调工具 import 微调数据集生成
|
# from crazy_functions.chatglm微调工具 import 微调数据集生成
|
||||||
@@ -594,8 +691,6 @@ def get_crazy_functions():
|
|||||||
# except:
|
# except:
|
||||||
# print('Load function plugin failed')
|
# print('Load function plugin failed')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
设置默认值:
|
设置默认值:
|
||||||
- 默认 Group = 对话
|
- 默认 Group = 对话
|
||||||
@@ -605,12 +700,12 @@ def get_crazy_functions():
|
|||||||
"""
|
"""
|
||||||
for name, function_meta in function_plugins.items():
|
for name, function_meta in function_plugins.items():
|
||||||
if "Group" not in function_meta:
|
if "Group" not in function_meta:
|
||||||
function_plugins[name]["Group"] = '对话'
|
function_plugins[name]["Group"] = "对话"
|
||||||
if "AsButton" not in function_meta:
|
if "AsButton" not in function_meta:
|
||||||
function_plugins[name]["AsButton"] = True
|
function_plugins[name]["AsButton"] = True
|
||||||
if "AdvancedArgs" not in function_meta:
|
if "AdvancedArgs" not in function_meta:
|
||||||
function_plugins[name]["AdvancedArgs"] = False
|
function_plugins[name]["AdvancedArgs"] = False
|
||||||
if "Color" not in function_meta:
|
if "Color" not in function_meta:
|
||||||
function_plugins[name]["Color"] = 'secondary'
|
function_plugins[name]["Color"] = "secondary"
|
||||||
|
|
||||||
return function_plugins
|
return function_plugins
|
||||||
|
|||||||
@@ -137,7 +137,7 @@ def get_recent_file_prompt_support(chatbot):
|
|||||||
return path
|
return path
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
@@ -145,7 +145,7 @@ def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history
|
|||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
history 聊天历史,前情提要
|
history 聊天历史,前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|||||||
@@ -26,8 +26,8 @@ class PaperFileGroup():
|
|||||||
self.sp_file_index.append(index)
|
self.sp_file_index.append(index)
|
||||||
self.sp_file_tag.append(self.file_paths[index])
|
self.sp_file_tag.append(self.file_paths[index])
|
||||||
else:
|
else:
|
||||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||||
for j, segment in enumerate(segments):
|
for j, segment in enumerate(segments):
|
||||||
self.sp_file_contents.append(segment)
|
self.sp_file_contents.append(segment)
|
||||||
self.sp_file_index.append(index)
|
self.sp_file_index.append(index)
|
||||||
@@ -135,11 +135,11 @@ def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
# 基本信息:功能、贡献者
|
# 基本信息:功能、贡献者
|
||||||
chatbot.append([
|
chatbot.append([
|
||||||
"函数插件功能?",
|
"函数插件功能?",
|
||||||
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky。(注意,此插件不调用Latex,如果有Latex环境,请使用“Latex英文纠错+高亮”插件)"])
|
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky。(注意,此插件不调用Latex,如果有Latex环境,请使用「Latex英文纠错+高亮修正位置(需Latex)插件」"])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
@@ -173,7 +173,7 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
# 基本信息:功能、贡献者
|
# 基本信息:功能、贡献者
|
||||||
chatbot.append([
|
chatbot.append([
|
||||||
"函数插件功能?",
|
"函数插件功能?",
|
||||||
@@ -209,7 +209,7 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
# 基本信息:功能、贡献者
|
# 基本信息:功能、贡献者
|
||||||
chatbot.append([
|
chatbot.append([
|
||||||
"函数插件功能?",
|
"函数插件功能?",
|
||||||
|
|||||||
@@ -26,8 +26,8 @@ class PaperFileGroup():
|
|||||||
self.sp_file_index.append(index)
|
self.sp_file_index.append(index)
|
||||||
self.sp_file_tag.append(self.file_paths[index])
|
self.sp_file_tag.append(self.file_paths[index])
|
||||||
else:
|
else:
|
||||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||||
for j, segment in enumerate(segments):
|
for j, segment in enumerate(segments):
|
||||||
self.sp_file_contents.append(segment)
|
self.sp_file_contents.append(segment)
|
||||||
self.sp_file_index.append(index)
|
self.sp_file_index.append(index)
|
||||||
@@ -106,7 +106,7 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
# 基本信息:功能、贡献者
|
# 基本信息:功能、贡献者
|
||||||
chatbot.append([
|
chatbot.append([
|
||||||
"函数插件功能?",
|
"函数插件功能?",
|
||||||
@@ -143,7 +143,7 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
# 基本信息:功能、贡献者
|
# 基本信息:功能、贡献者
|
||||||
chatbot.append([
|
chatbot.append([
|
||||||
"函数插件功能?",
|
"函数插件功能?",
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone
|
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone
|
||||||
from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str
|
from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str
|
||||||
from functools import partial
|
from functools import partial
|
||||||
import glob, os, requests, time
|
import glob, os, requests, time, tarfile
|
||||||
pj = os.path.join
|
pj = os.path.join
|
||||||
ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
||||||
|
|
||||||
# =================================== 工具函数 ===============================================
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 工具函数 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
# 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
|
# 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
|
||||||
def switch_prompt(pfg, mode, more_requirement):
|
def switch_prompt(pfg, mode, more_requirement):
|
||||||
"""
|
"""
|
||||||
@@ -73,6 +73,7 @@ def move_project(project_folder, arxiv_id=None):
|
|||||||
|
|
||||||
# align subfolder if there is a folder wrapper
|
# align subfolder if there is a folder wrapper
|
||||||
items = glob.glob(pj(project_folder,'*'))
|
items = glob.glob(pj(project_folder,'*'))
|
||||||
|
items = [item for item in items if os.path.basename(item)!='__MACOSX']
|
||||||
if len(glob.glob(pj(project_folder,'*.tex'))) == 0 and len(items) == 1:
|
if len(glob.glob(pj(project_folder,'*.tex'))) == 0 and len(items) == 1:
|
||||||
if os.path.isdir(items[0]): project_folder = items[0]
|
if os.path.isdir(items[0]): project_folder = items[0]
|
||||||
|
|
||||||
@@ -87,6 +88,9 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
|
|||||||
target_file = pj(translation_dir, 'translate_zh.pdf')
|
target_file = pj(translation_dir, 'translate_zh.pdf')
|
||||||
if os.path.exists(target_file):
|
if os.path.exists(target_file):
|
||||||
promote_file_to_downloadzone(target_file, rename_file=None, chatbot=chatbot)
|
promote_file_to_downloadzone(target_file, rename_file=None, chatbot=chatbot)
|
||||||
|
target_file_compare = pj(translation_dir, 'comparison.pdf')
|
||||||
|
if os.path.exists(target_file_compare):
|
||||||
|
promote_file_to_downloadzone(target_file_compare, rename_file=None, chatbot=chatbot)
|
||||||
return target_file
|
return target_file
|
||||||
return False
|
return False
|
||||||
def is_float(s):
|
def is_float(s):
|
||||||
@@ -100,7 +104,7 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
|
|||||||
if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID
|
if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID
|
||||||
txt = 'https://arxiv.org/abs/' + txt[:10]
|
txt = 'https://arxiv.org/abs/' + txt[:10]
|
||||||
if not txt.startswith('https://arxiv.org'):
|
if not txt.startswith('https://arxiv.org'):
|
||||||
return txt, None
|
return txt, None # 是本地文件,跳过下载
|
||||||
|
|
||||||
# <-------------- inspect format ------------->
|
# <-------------- inspect format ------------->
|
||||||
chatbot.append([f"检测到arxiv文档连接", '尝试下载 ...'])
|
chatbot.append([f"检测到arxiv文档连接", '尝试下载 ...'])
|
||||||
@@ -138,11 +142,11 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
|
|||||||
from toolbox import extract_archive
|
from toolbox import extract_archive
|
||||||
extract_archive(file_path=dst, dest_dir=extract_dst)
|
extract_archive(file_path=dst, dest_dir=extract_dst)
|
||||||
return extract_dst, arxiv_id
|
return extract_dst, arxiv_id
|
||||||
# ========================================= 插件主程序1 =====================================================
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
# <-------------- information about this plugin ------------->
|
# <-------------- information about this plugin ------------->
|
||||||
chatbot.append([ "函数插件功能?",
|
chatbot.append([ "函数插件功能?",
|
||||||
"对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"])
|
"对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"])
|
||||||
@@ -214,11 +218,10 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo
|
|||||||
# <-------------- we are done ------------->
|
# <-------------- we are done ------------->
|
||||||
return success
|
return success
|
||||||
|
|
||||||
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
# ========================================= 插件主程序2 =====================================================
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
# <-------------- information about this plugin ------------->
|
# <-------------- information about this plugin ------------->
|
||||||
chatbot.append([
|
chatbot.append([
|
||||||
"函数插件功能?",
|
"函数插件功能?",
|
||||||
@@ -247,7 +250,14 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
|
|||||||
|
|
||||||
# <-------------- clear history and read input ------------->
|
# <-------------- clear history and read input ------------->
|
||||||
history = []
|
history = []
|
||||||
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
|
try:
|
||||||
|
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
|
||||||
|
except tarfile.ReadError as e:
|
||||||
|
yield from update_ui_lastest_msg(
|
||||||
|
"无法自动下载该论文的Latex源码,请前往arxiv打开此论文下载页面,点other Formats,然后download source手动下载latex源码包。接下来调用本地Latex翻译插件即可。",
|
||||||
|
chatbot=chatbot, history=history)
|
||||||
|
return
|
||||||
|
|
||||||
if txt.endswith('.pdf'):
|
if txt.endswith('.pdf'):
|
||||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|||||||
@@ -35,7 +35,11 @@ def gpt_academic_generate_oai_reply(
|
|||||||
class AutoGenGeneral(PluginMultiprocessManager):
|
class AutoGenGeneral(PluginMultiprocessManager):
|
||||||
def gpt_academic_print_override(self, user_proxy, message, sender):
|
def gpt_academic_print_override(self, user_proxy, message, sender):
|
||||||
# ⭐⭐ run in subprocess
|
# ⭐⭐ run in subprocess
|
||||||
self.child_conn.send(PipeCom("show", sender.name + "\n\n---\n\n" + message["content"]))
|
try:
|
||||||
|
print_msg = sender.name + "\n\n---\n\n" + message["content"]
|
||||||
|
except:
|
||||||
|
print_msg = sender.name + "\n\n---\n\n" + message
|
||||||
|
self.child_conn.send(PipeCom("show", print_msg))
|
||||||
|
|
||||||
def gpt_academic_get_human_input(self, user_proxy, message):
|
def gpt_academic_get_human_input(self, user_proxy, message):
|
||||||
# ⭐⭐ run in subprocess
|
# ⭐⭐ run in subprocess
|
||||||
@@ -62,33 +66,33 @@ class AutoGenGeneral(PluginMultiprocessManager):
|
|||||||
def exe_autogen(self, input):
|
def exe_autogen(self, input):
|
||||||
# ⭐⭐ run in subprocess
|
# ⭐⭐ run in subprocess
|
||||||
input = input.content
|
input = input.content
|
||||||
with ProxyNetworkActivate("AutoGen"):
|
code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker}
|
||||||
code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker}
|
agents = self.define_agents()
|
||||||
agents = self.define_agents()
|
user_proxy = None
|
||||||
user_proxy = None
|
assistant = None
|
||||||
assistant = None
|
for agent_kwargs in agents:
|
||||||
for agent_kwargs in agents:
|
agent_cls = agent_kwargs.pop('cls')
|
||||||
agent_cls = agent_kwargs.pop('cls')
|
kwargs = {
|
||||||
kwargs = {
|
'llm_config':self.llm_kwargs,
|
||||||
'llm_config':self.llm_kwargs,
|
'code_execution_config':code_execution_config
|
||||||
'code_execution_config':code_execution_config
|
}
|
||||||
}
|
kwargs.update(agent_kwargs)
|
||||||
kwargs.update(agent_kwargs)
|
agent_handle = agent_cls(**kwargs)
|
||||||
agent_handle = agent_cls(**kwargs)
|
agent_handle._print_received_message = lambda a,b: self.gpt_academic_print_override(agent_kwargs, a, b)
|
||||||
agent_handle._print_received_message = lambda a,b: self.gpt_academic_print_override(agent_kwargs, a, b)
|
for d in agent_handle._reply_func_list:
|
||||||
for d in agent_handle._reply_func_list:
|
if hasattr(d['reply_func'],'__name__') and d['reply_func'].__name__ == 'generate_oai_reply':
|
||||||
if hasattr(d['reply_func'],'__name__') and d['reply_func'].__name__ == 'generate_oai_reply':
|
d['reply_func'] = gpt_academic_generate_oai_reply
|
||||||
d['reply_func'] = gpt_academic_generate_oai_reply
|
if agent_kwargs['name'] == 'user_proxy':
|
||||||
if agent_kwargs['name'] == 'user_proxy':
|
agent_handle.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a)
|
||||||
agent_handle.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a)
|
user_proxy = agent_handle
|
||||||
user_proxy = agent_handle
|
if agent_kwargs['name'] == 'assistant': assistant = agent_handle
|
||||||
if agent_kwargs['name'] == 'assistant': assistant = agent_handle
|
try:
|
||||||
try:
|
if user_proxy is None or assistant is None: raise Exception("用户代理或助理代理未定义")
|
||||||
if user_proxy is None or assistant is None: raise Exception("用户代理或助理代理未定义")
|
with ProxyNetworkActivate("AutoGen"):
|
||||||
user_proxy.initiate_chat(assistant, message=input)
|
user_proxy.initiate_chat(assistant, message=input)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||||
self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str))
|
self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str))
|
||||||
|
|
||||||
def subprocess_worker(self, child_conn):
|
def subprocess_worker(self, child_conn):
|
||||||
# ⭐⭐ run in subprocess
|
# ⭐⭐ run in subprocess
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ class PipeCom:
|
|||||||
|
|
||||||
|
|
||||||
class PluginMultiprocessManager:
|
class PluginMultiprocessManager:
|
||||||
def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
# ⭐ run in main process
|
# ⭐ run in main process
|
||||||
self.autogen_work_dir = os.path.join(get_log_folder("autogen"), gen_time_str())
|
self.autogen_work_dir = os.path.join(get_log_folder("autogen"), gen_time_str())
|
||||||
self.previous_work_dir_files = {}
|
self.previous_work_dir_files = {}
|
||||||
@@ -18,7 +18,7 @@ class PluginMultiprocessManager:
|
|||||||
self.chatbot = chatbot
|
self.chatbot = chatbot
|
||||||
self.history = history
|
self.history = history
|
||||||
self.system_prompt = system_prompt
|
self.system_prompt = system_prompt
|
||||||
# self.web_port = web_port
|
# self.user_request = user_request
|
||||||
self.alive = True
|
self.alive = True
|
||||||
self.use_docker = get_conf("AUTOGEN_USE_DOCKER")
|
self.use_docker = get_conf("AUTOGEN_USE_DOCKER")
|
||||||
self.last_user_input = ""
|
self.last_user_input = ""
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ def string_to_options(arguments):
|
|||||||
return args
|
return args
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
@@ -40,7 +40,7 @@ def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
history 聊天历史,前情提要
|
history 聊天历史,前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成"))
|
chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成"))
|
||||||
@@ -80,7 +80,7 @@ def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
@@ -88,7 +88,7 @@ def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
history 聊天历史,前情提要
|
history 聊天历史,前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
import subprocess
|
import subprocess
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from toolbox import update_ui, get_conf, trimmed_format_exc, get_log_folder
|
from toolbox import update_ui, get_conf, trimmed_format_exc, get_max_token, Singleton
|
||||||
import threading
|
import threading
|
||||||
import os
|
import os
|
||||||
import logging
|
import logging
|
||||||
@@ -92,7 +92,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
|
|||||||
# 【选择处理】 尝试计算比例,尽可能多地保留文本
|
# 【选择处理】 尝试计算比例,尽可能多地保留文本
|
||||||
from toolbox import get_reduce_token_percent
|
from toolbox import get_reduce_token_percent
|
||||||
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
|
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
|
||||||
MAX_TOKEN = 4096
|
MAX_TOKEN = get_max_token(llm_kwargs)
|
||||||
EXCEED_ALLO = 512 + 512 * exceeded_cnt
|
EXCEED_ALLO = 512 + 512 * exceeded_cnt
|
||||||
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
|
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
|
||||||
mutable[0] += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
|
mutable[0] += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
|
||||||
@@ -139,6 +139,8 @@ def can_multi_process(llm):
|
|||||||
if llm.startswith('gpt-'): return True
|
if llm.startswith('gpt-'): return True
|
||||||
if llm.startswith('api2d-'): return True
|
if llm.startswith('api2d-'): return True
|
||||||
if llm.startswith('azure-'): return True
|
if llm.startswith('azure-'): return True
|
||||||
|
if llm.startswith('spark'): return True
|
||||||
|
if llm.startswith('zhipuai'): return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||||
@@ -224,7 +226,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|||||||
# 【选择处理】 尝试计算比例,尽可能多地保留文本
|
# 【选择处理】 尝试计算比例,尽可能多地保留文本
|
||||||
from toolbox import get_reduce_token_percent
|
from toolbox import get_reduce_token_percent
|
||||||
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
|
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
|
||||||
MAX_TOKEN = 4096
|
MAX_TOKEN = get_max_token(llm_kwargs)
|
||||||
EXCEED_ALLO = 512 + 512 * exceeded_cnt
|
EXCEED_ALLO = 512 + 512 * exceeded_cnt
|
||||||
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
|
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
|
||||||
gpt_say += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
|
gpt_say += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
|
||||||
@@ -282,8 +284,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|||||||
# 在前端打印些好玩的东西
|
# 在前端打印些好玩的东西
|
||||||
for thread_index, _ in enumerate(worker_done):
|
for thread_index, _ in enumerate(worker_done):
|
||||||
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
|
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
|
||||||
replace('\n', '').replace('`', '.').replace(
|
replace('\n', '').replace('`', '.').replace(' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
|
||||||
' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
|
|
||||||
observe_win.append(print_something_really_funny)
|
observe_win.append(print_something_really_funny)
|
||||||
# 在前端打印些好玩的东西
|
# 在前端打印些好玩的东西
|
||||||
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
|
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
|
||||||
@@ -312,95 +313,6 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|||||||
return gpt_response_collection
|
return gpt_response_collection
|
||||||
|
|
||||||
|
|
||||||
def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
|
|
||||||
def cut(txt_tocut, must_break_at_empty_line): # 递归
|
|
||||||
if get_token_fn(txt_tocut) <= limit:
|
|
||||||
return [txt_tocut]
|
|
||||||
else:
|
|
||||||
lines = txt_tocut.split('\n')
|
|
||||||
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
|
|
||||||
estimated_line_cut = int(estimated_line_cut)
|
|
||||||
for cnt in reversed(range(estimated_line_cut)):
|
|
||||||
if must_break_at_empty_line:
|
|
||||||
if lines[cnt] != "":
|
|
||||||
continue
|
|
||||||
print(cnt)
|
|
||||||
prev = "\n".join(lines[:cnt])
|
|
||||||
post = "\n".join(lines[cnt:])
|
|
||||||
if get_token_fn(prev) < limit:
|
|
||||||
break
|
|
||||||
if cnt == 0:
|
|
||||||
raise RuntimeError("存在一行极长的文本!")
|
|
||||||
# print(len(post))
|
|
||||||
# 列表递归接龙
|
|
||||||
result = [prev]
|
|
||||||
result.extend(cut(post, must_break_at_empty_line))
|
|
||||||
return result
|
|
||||||
try:
|
|
||||||
return cut(txt, must_break_at_empty_line=True)
|
|
||||||
except RuntimeError:
|
|
||||||
return cut(txt, must_break_at_empty_line=False)
|
|
||||||
|
|
||||||
|
|
||||||
def force_breakdown(txt, limit, get_token_fn):
|
|
||||||
"""
|
|
||||||
当无法用标点、空行分割时,我们用最暴力的方法切割
|
|
||||||
"""
|
|
||||||
for i in reversed(range(len(txt))):
|
|
||||||
if get_token_fn(txt[:i]) < limit:
|
|
||||||
return txt[:i], txt[i:]
|
|
||||||
return "Tiktoken未知错误", "Tiktoken未知错误"
|
|
||||||
|
|
||||||
def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
|
|
||||||
# 递归
|
|
||||||
def cut(txt_tocut, must_break_at_empty_line, break_anyway=False):
|
|
||||||
if get_token_fn(txt_tocut) <= limit:
|
|
||||||
return [txt_tocut]
|
|
||||||
else:
|
|
||||||
lines = txt_tocut.split('\n')
|
|
||||||
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
|
|
||||||
estimated_line_cut = int(estimated_line_cut)
|
|
||||||
cnt = 0
|
|
||||||
for cnt in reversed(range(estimated_line_cut)):
|
|
||||||
if must_break_at_empty_line:
|
|
||||||
if lines[cnt] != "":
|
|
||||||
continue
|
|
||||||
prev = "\n".join(lines[:cnt])
|
|
||||||
post = "\n".join(lines[cnt:])
|
|
||||||
if get_token_fn(prev) < limit:
|
|
||||||
break
|
|
||||||
if cnt == 0:
|
|
||||||
if break_anyway:
|
|
||||||
prev, post = force_breakdown(txt_tocut, limit, get_token_fn)
|
|
||||||
else:
|
|
||||||
raise RuntimeError(f"存在一行极长的文本!{txt_tocut}")
|
|
||||||
# print(len(post))
|
|
||||||
# 列表递归接龙
|
|
||||||
result = [prev]
|
|
||||||
result.extend(cut(post, must_break_at_empty_line, break_anyway=break_anyway))
|
|
||||||
return result
|
|
||||||
try:
|
|
||||||
# 第1次尝试,将双空行(\n\n)作为切分点
|
|
||||||
return cut(txt, must_break_at_empty_line=True)
|
|
||||||
except RuntimeError:
|
|
||||||
try:
|
|
||||||
# 第2次尝试,将单空行(\n)作为切分点
|
|
||||||
return cut(txt, must_break_at_empty_line=False)
|
|
||||||
except RuntimeError:
|
|
||||||
try:
|
|
||||||
# 第3次尝试,将英文句号(.)作为切分点
|
|
||||||
res = cut(txt.replace('.', '。\n'), must_break_at_empty_line=False) # 这个中文的句号是故意的,作为一个标识而存在
|
|
||||||
return [r.replace('。\n', '.') for r in res]
|
|
||||||
except RuntimeError as e:
|
|
||||||
try:
|
|
||||||
# 第4次尝试,将中文句号(。)作为切分点
|
|
||||||
res = cut(txt.replace('。', '。。\n'), must_break_at_empty_line=False)
|
|
||||||
return [r.replace('。。\n', '。') for r in res]
|
|
||||||
except RuntimeError as e:
|
|
||||||
# 第5次尝试,没办法了,随便切一下敷衍吧
|
|
||||||
return cut(txt, must_break_at_empty_line=False, break_anyway=True)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def read_and_clean_pdf_text(fp):
|
def read_and_clean_pdf_text(fp):
|
||||||
"""
|
"""
|
||||||
@@ -553,6 +465,9 @@ def read_and_clean_pdf_text(fp):
|
|||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
# 对于某些PDF会有第一个段落就以小写字母开头,为了避免索引错误将其更改为大写
|
||||||
|
if starts_with_lowercase_word(meta_txt[0]):
|
||||||
|
meta_txt[0] = meta_txt[0].capitalize()
|
||||||
for _ in range(100):
|
for _ in range(100):
|
||||||
for index, block_txt in enumerate(meta_txt):
|
for index, block_txt in enumerate(meta_txt):
|
||||||
if starts_with_lowercase_word(block_txt):
|
if starts_with_lowercase_word(block_txt):
|
||||||
@@ -631,90 +546,6 @@ def get_files_from_everything(txt, type): # type='.md'
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def Singleton(cls):
|
|
||||||
_instance = {}
|
|
||||||
|
|
||||||
def _singleton(*args, **kargs):
|
|
||||||
if cls not in _instance:
|
|
||||||
_instance[cls] = cls(*args, **kargs)
|
|
||||||
return _instance[cls]
|
|
||||||
|
|
||||||
return _singleton
|
|
||||||
|
|
||||||
|
|
||||||
@Singleton
|
|
||||||
class knowledge_archive_interface():
|
|
||||||
def __init__(self) -> None:
|
|
||||||
self.threadLock = threading.Lock()
|
|
||||||
self.current_id = ""
|
|
||||||
self.kai_path = None
|
|
||||||
self.qa_handle = None
|
|
||||||
self.text2vec_large_chinese = None
|
|
||||||
|
|
||||||
def get_chinese_text2vec(self):
|
|
||||||
if self.text2vec_large_chinese is None:
|
|
||||||
# < -------------------预热文本向量化模组--------------- >
|
|
||||||
from toolbox import ProxyNetworkActivate
|
|
||||||
print('Checking Text2vec ...')
|
|
||||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
|
||||||
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
|
|
||||||
self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
|
|
||||||
|
|
||||||
return self.text2vec_large_chinese
|
|
||||||
|
|
||||||
|
|
||||||
def feed_archive(self, file_manifest, id="default"):
|
|
||||||
self.threadLock.acquire()
|
|
||||||
# import uuid
|
|
||||||
self.current_id = id
|
|
||||||
from zh_langchain import construct_vector_store
|
|
||||||
self.qa_handle, self.kai_path = construct_vector_store(
|
|
||||||
vs_id=self.current_id,
|
|
||||||
files=file_manifest,
|
|
||||||
sentence_size=100,
|
|
||||||
history=[],
|
|
||||||
one_conent="",
|
|
||||||
one_content_segmentation="",
|
|
||||||
text2vec = self.get_chinese_text2vec(),
|
|
||||||
)
|
|
||||||
self.threadLock.release()
|
|
||||||
|
|
||||||
def get_current_archive_id(self):
|
|
||||||
return self.current_id
|
|
||||||
|
|
||||||
def get_loaded_file(self):
|
|
||||||
return self.qa_handle.get_loaded_file()
|
|
||||||
|
|
||||||
def answer_with_archive_by_id(self, txt, id):
|
|
||||||
self.threadLock.acquire()
|
|
||||||
if not self.current_id == id:
|
|
||||||
self.current_id = id
|
|
||||||
from zh_langchain import construct_vector_store
|
|
||||||
self.qa_handle, self.kai_path = construct_vector_store(
|
|
||||||
vs_id=self.current_id,
|
|
||||||
files=[],
|
|
||||||
sentence_size=100,
|
|
||||||
history=[],
|
|
||||||
one_conent="",
|
|
||||||
one_content_segmentation="",
|
|
||||||
text2vec = self.get_chinese_text2vec(),
|
|
||||||
)
|
|
||||||
VECTOR_SEARCH_SCORE_THRESHOLD = 0
|
|
||||||
VECTOR_SEARCH_TOP_K = 4
|
|
||||||
CHUNK_SIZE = 512
|
|
||||||
resp, prompt = self.qa_handle.get_knowledge_based_conent_test(
|
|
||||||
query = txt,
|
|
||||||
vs_path = self.kai_path,
|
|
||||||
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
|
|
||||||
vector_search_top_k=VECTOR_SEARCH_TOP_K,
|
|
||||||
chunk_conent=True,
|
|
||||||
chunk_size=CHUNK_SIZE,
|
|
||||||
text2vec = self.get_chinese_text2vec(),
|
|
||||||
)
|
|
||||||
self.threadLock.release()
|
|
||||||
return resp, prompt
|
|
||||||
|
|
||||||
@Singleton
|
@Singleton
|
||||||
class nougat_interface():
|
class nougat_interface():
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
|||||||
@@ -0,0 +1,122 @@
|
|||||||
|
import os
|
||||||
|
from textwrap import indent
|
||||||
|
|
||||||
|
class FileNode:
|
||||||
|
def __init__(self, name):
|
||||||
|
self.name = name
|
||||||
|
self.children = []
|
||||||
|
self.is_leaf = False
|
||||||
|
self.level = 0
|
||||||
|
self.parenting_ship = []
|
||||||
|
self.comment = ""
|
||||||
|
self.comment_maxlen_show = 50
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def add_linebreaks_at_spaces(string, interval=10):
|
||||||
|
return '\n'.join(string[i:i+interval] for i in range(0, len(string), interval))
|
||||||
|
|
||||||
|
def sanitize_comment(self, comment):
|
||||||
|
if len(comment) > self.comment_maxlen_show: suf = '...'
|
||||||
|
else: suf = ''
|
||||||
|
comment = comment[:self.comment_maxlen_show]
|
||||||
|
comment = comment.replace('\"', '').replace('`', '').replace('\n', '').replace('`', '').replace('$', '')
|
||||||
|
comment = self.add_linebreaks_at_spaces(comment, 10)
|
||||||
|
return '`' + comment + suf + '`'
|
||||||
|
|
||||||
|
def add_file(self, file_path, file_comment):
|
||||||
|
directory_names, file_name = os.path.split(file_path)
|
||||||
|
current_node = self
|
||||||
|
level = 1
|
||||||
|
if directory_names == "":
|
||||||
|
new_node = FileNode(file_name)
|
||||||
|
current_node.children.append(new_node)
|
||||||
|
new_node.is_leaf = True
|
||||||
|
new_node.comment = self.sanitize_comment(file_comment)
|
||||||
|
new_node.level = level
|
||||||
|
current_node = new_node
|
||||||
|
else:
|
||||||
|
dnamesplit = directory_names.split(os.sep)
|
||||||
|
for i, directory_name in enumerate(dnamesplit):
|
||||||
|
found_child = False
|
||||||
|
level += 1
|
||||||
|
for child in current_node.children:
|
||||||
|
if child.name == directory_name:
|
||||||
|
current_node = child
|
||||||
|
found_child = True
|
||||||
|
break
|
||||||
|
if not found_child:
|
||||||
|
new_node = FileNode(directory_name)
|
||||||
|
current_node.children.append(new_node)
|
||||||
|
new_node.level = level - 1
|
||||||
|
current_node = new_node
|
||||||
|
term = FileNode(file_name)
|
||||||
|
term.level = level
|
||||||
|
term.comment = self.sanitize_comment(file_comment)
|
||||||
|
term.is_leaf = True
|
||||||
|
current_node.children.append(term)
|
||||||
|
|
||||||
|
def print_files_recursively(self, level=0, code="R0"):
|
||||||
|
print(' '*level + self.name + ' ' + str(self.is_leaf) + ' ' + str(self.level))
|
||||||
|
for j, child in enumerate(self.children):
|
||||||
|
child.print_files_recursively(level=level+1, code=code+str(j))
|
||||||
|
self.parenting_ship.extend(child.parenting_ship)
|
||||||
|
p1 = f"""{code}[\"🗎{self.name}\"]""" if self.is_leaf else f"""{code}[[\"📁{self.name}\"]]"""
|
||||||
|
p2 = """ --> """
|
||||||
|
p3 = f"""{code+str(j)}[\"🗎{child.name}\"]""" if child.is_leaf else f"""{code+str(j)}[[\"📁{child.name}\"]]"""
|
||||||
|
edge_code = p1 + p2 + p3
|
||||||
|
if edge_code in self.parenting_ship:
|
||||||
|
continue
|
||||||
|
self.parenting_ship.append(edge_code)
|
||||||
|
if self.comment != "":
|
||||||
|
pc1 = f"""{code}[\"🗎{self.name}\"]""" if self.is_leaf else f"""{code}[[\"📁{self.name}\"]]"""
|
||||||
|
pc2 = f""" -.-x """
|
||||||
|
pc3 = f"""C{code}[\"{self.comment}\"]:::Comment"""
|
||||||
|
edge_code = pc1 + pc2 + pc3
|
||||||
|
self.parenting_ship.append(edge_code)
|
||||||
|
|
||||||
|
|
||||||
|
MERMAID_TEMPLATE = r"""
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
%% <gpt_academic_hide_mermaid_code> 一个特殊标记,用于在生成mermaid图表时隐藏代码块
|
||||||
|
classDef Comment stroke-dasharray: 5 5
|
||||||
|
subgraph {graph_name}
|
||||||
|
{relationship}
|
||||||
|
end
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
|
||||||
|
def build_file_tree_mermaid_diagram(file_manifest, file_comments, graph_name):
|
||||||
|
# Create the root node
|
||||||
|
file_tree_struct = FileNode("root")
|
||||||
|
# Build the tree structure
|
||||||
|
for file_path, file_comment in zip(file_manifest, file_comments):
|
||||||
|
file_tree_struct.add_file(file_path, file_comment)
|
||||||
|
file_tree_struct.print_files_recursively()
|
||||||
|
cc = "\n".join(file_tree_struct.parenting_ship)
|
||||||
|
ccc = indent(cc, prefix=" "*8)
|
||||||
|
return MERMAID_TEMPLATE.format(graph_name=graph_name, relationship=ccc)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# File manifest
|
||||||
|
file_manifest = [
|
||||||
|
"cradle_void_terminal.ipynb",
|
||||||
|
"tests/test_utils.py",
|
||||||
|
"tests/test_plugins.py",
|
||||||
|
"tests/test_llms.py",
|
||||||
|
"config.py",
|
||||||
|
"build/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/model_weights_0.bin",
|
||||||
|
"crazy_functions/latex_fns/latex_actions.py",
|
||||||
|
"crazy_functions/latex_fns/latex_toolbox.py"
|
||||||
|
]
|
||||||
|
file_comments = [
|
||||||
|
"根据位置和名称,可能是一个模块的初始化文件根据位置和名称,可能是一个模块的初始化文件根据位置和名称,可能是一个模块的初始化文件",
|
||||||
|
"包含一些用于文本处理和模型微调的函数和装饰器包含一些用于文本处理和模型微调的函数和装饰器包含一些用于文本处理和模型微调的函数和装饰器",
|
||||||
|
"用于构建HTML报告的类和方法用于构建HTML报告的类和方法用于构建HTML报告的类和方法",
|
||||||
|
"包含了用于文本切分的函数,以及处理PDF文件的示例代码包含了用于文本切分的函数,以及处理PDF文件的示例代码包含了用于文本切分的函数,以及处理PDF文件的示例代码",
|
||||||
|
"用于解析和翻译PDF文件的功能和相关辅助函数用于解析和翻译PDF文件的功能和相关辅助函数用于解析和翻译PDF文件的功能和相关辅助函数",
|
||||||
|
"是一个包的初始化文件,用于初始化包的属性和导入模块是一个包的初始化文件,用于初始化包的属性和导入模块是一个包的初始化文件,用于初始化包的属性和导入模块",
|
||||||
|
"用于加载和分割文件中的文本的通用文件加载器用于加载和分割文件中的文本的通用文件加载器用于加载和分割文件中的文本的通用文件加载器",
|
||||||
|
"包含了用于构建和管理向量数据库的函数和类包含了用于构建和管理向量数据库的函数和类包含了用于构建和管理向量数据库的函数和类",
|
||||||
|
]
|
||||||
|
print(build_file_tree_mermaid_diagram(file_manifest, file_comments, "项目文件树"))
|
||||||
@@ -0,0 +1,42 @@
|
|||||||
|
from toolbox import CatchException, update_ui, update_ui_lastest_msg
|
||||||
|
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
|
||||||
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
|
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||||
|
from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing
|
||||||
|
import random
|
||||||
|
|
||||||
|
|
||||||
|
class MiniGame_ASCII_Art(GptAcademicGameBaseState):
|
||||||
|
def step(self, prompt, chatbot, history):
|
||||||
|
if self.step_cnt == 0:
|
||||||
|
chatbot.append(["我画你猜(动物)", "请稍等..."])
|
||||||
|
else:
|
||||||
|
if prompt.strip() == 'exit':
|
||||||
|
self.delete_game = True
|
||||||
|
yield from update_ui_lastest_msg(lastmsg=f"谜底是{self.obj},游戏结束。", chatbot=chatbot, history=history, delay=0.)
|
||||||
|
return
|
||||||
|
chatbot.append([prompt, ""])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
|
if self.step_cnt == 0:
|
||||||
|
self.lock_plugin(chatbot)
|
||||||
|
self.cur_task = 'draw'
|
||||||
|
|
||||||
|
if self.cur_task == 'draw':
|
||||||
|
avail_obj = ["狗","猫","鸟","鱼","老鼠","蛇"]
|
||||||
|
self.obj = random.choice(avail_obj)
|
||||||
|
inputs = "I want to play a game called Guess the ASCII art. You can draw the ASCII art and I will try to guess it. " + \
|
||||||
|
f"This time you draw a {self.obj}. Note that you must not indicate what you have draw in the text, and you should only produce the ASCII art wrapped by ```. "
|
||||||
|
raw_res = predict_no_ui_long_connection(inputs=inputs, llm_kwargs=self.llm_kwargs, history=[], sys_prompt="")
|
||||||
|
self.cur_task = 'identify user guess'
|
||||||
|
res = get_code_block(raw_res)
|
||||||
|
history += ['', f'the answer is {self.obj}', inputs, res]
|
||||||
|
yield from update_ui_lastest_msg(lastmsg=res, chatbot=chatbot, history=history, delay=0.)
|
||||||
|
|
||||||
|
elif self.cur_task == 'identify user guess':
|
||||||
|
if is_same_thing(self.obj, prompt, self.llm_kwargs):
|
||||||
|
self.delete_game = True
|
||||||
|
yield from update_ui_lastest_msg(lastmsg="你猜对了!", chatbot=chatbot, history=history, delay=0.)
|
||||||
|
else:
|
||||||
|
self.cur_task = 'identify user guess'
|
||||||
|
yield from update_ui_lastest_msg(lastmsg="猜错了,再试试,输入“exit”获取答案。", chatbot=chatbot, history=history, delay=0.)
|
||||||
@@ -0,0 +1,212 @@
|
|||||||
|
prompts_hs = """ 请以“{headstart}”为开头,编写一个小说的第一幕。
|
||||||
|
|
||||||
|
- 尽量短,不要包含太多情节,因为你接下来将会与用户互动续写下面的情节,要留出足够的互动空间。
|
||||||
|
- 出现人物时,给出人物的名字。
|
||||||
|
- 积极地运用环境描写、人物描写等手法,让读者能够感受到你的故事世界。
|
||||||
|
- 积极地运用修辞手法,比如比喻、拟人、排比、对偶、夸张等等。
|
||||||
|
- 字数要求:第一幕的字数少于300字,且少于2个段落。
|
||||||
|
"""
|
||||||
|
|
||||||
|
prompts_interact = """ 小说的前文回顾:
|
||||||
|
「
|
||||||
|
{previously_on_story}
|
||||||
|
」
|
||||||
|
|
||||||
|
你是一个作家,根据以上的情节,给出4种不同的后续剧情发展方向,每个发展方向都精明扼要地用一句话说明。稍后,我将在这4个选择中,挑选一种剧情发展。
|
||||||
|
|
||||||
|
输出格式例如:
|
||||||
|
1. 后续剧情发展1
|
||||||
|
2. 后续剧情发展2
|
||||||
|
3. 后续剧情发展3
|
||||||
|
4. 后续剧情发展4
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
prompts_resume = """小说的前文回顾:
|
||||||
|
「
|
||||||
|
{previously_on_story}
|
||||||
|
」
|
||||||
|
|
||||||
|
你是一个作家,我们正在互相讨论,确定后续剧情的发展。
|
||||||
|
在以下的剧情发展中,
|
||||||
|
「
|
||||||
|
{choice}
|
||||||
|
」
|
||||||
|
我认为更合理的是:{user_choice}。
|
||||||
|
请在前文的基础上(不要重复前文),围绕我选定的剧情情节,编写小说的下一幕。
|
||||||
|
|
||||||
|
- 禁止杜撰不符合我选择的剧情。
|
||||||
|
- 尽量短,不要包含太多情节,因为你接下来将会与用户互动续写下面的情节,要留出足够的互动空间。
|
||||||
|
- 不要重复前文。
|
||||||
|
- 出现人物时,给出人物的名字。
|
||||||
|
- 积极地运用环境描写、人物描写等手法,让读者能够感受到你的故事世界。
|
||||||
|
- 积极地运用修辞手法,比如比喻、拟人、排比、对偶、夸张等等。
|
||||||
|
- 小说的下一幕字数少于300字,且少于2个段落。
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
prompts_terminate = """小说的前文回顾:
|
||||||
|
「
|
||||||
|
{previously_on_story}
|
||||||
|
」
|
||||||
|
|
||||||
|
你是一个作家,我们正在互相讨论,确定后续剧情的发展。
|
||||||
|
现在,故事该结束了,我认为最合理的故事结局是:{user_choice}。
|
||||||
|
|
||||||
|
请在前文的基础上(不要重复前文),编写小说的最后一幕。
|
||||||
|
|
||||||
|
- 不要重复前文。
|
||||||
|
- 出现人物时,给出人物的名字。
|
||||||
|
- 积极地运用环境描写、人物描写等手法,让读者能够感受到你的故事世界。
|
||||||
|
- 积极地运用修辞手法,比如比喻、拟人、排比、对偶、夸张等等。
|
||||||
|
- 字数要求:最后一幕的字数少于1000字。
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
from toolbox import CatchException, update_ui, update_ui_lastest_msg
|
||||||
|
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
|
||||||
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
|
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||||
|
from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing
|
||||||
|
import random
|
||||||
|
|
||||||
|
|
||||||
|
class MiniGame_ResumeStory(GptAcademicGameBaseState):
|
||||||
|
story_headstart = [
|
||||||
|
'先行者知道,他现在是全宇宙中唯一的一个人了。',
|
||||||
|
'深夜,一个年轻人穿过天安门广场向纪念堂走去。在二十二世纪编年史中,计算机把他的代号定为M102。',
|
||||||
|
'他知道,这最后一课要提前讲了。又一阵剧痛从肝部袭来,几乎使他晕厥过去。',
|
||||||
|
'在距地球五万光年的远方,在银河系的中心,一场延续了两万年的星际战争已接近尾声。那里的太空中渐渐隐现出一个方形区域,仿佛灿烂的群星的背景被剪出一个方口。',
|
||||||
|
'伊依一行三人乘坐一艘游艇在南太平洋上做吟诗航行,他们的目的地是南极,如果几天后能顺利到达那里,他们将钻出地壳去看诗云。',
|
||||||
|
'很多人生来就会莫名其妙地迷上一样东西,仿佛他的出生就是要和这东西约会似的,正是这样,圆圆迷上了肥皂泡。'
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def begin_game_step_0(self, prompt, chatbot, history):
|
||||||
|
# init game at step 0
|
||||||
|
self.headstart = random.choice(self.story_headstart)
|
||||||
|
self.story = []
|
||||||
|
chatbot.append(["互动写故事", f"这次的故事开头是:{self.headstart}"])
|
||||||
|
self.sys_prompt_ = '你是一个想象力丰富的杰出作家。正在与你的朋友互动,一起写故事,因此你每次写的故事段落应少于300字(结局除外)。'
|
||||||
|
|
||||||
|
|
||||||
|
def generate_story_image(self, story_paragraph):
|
||||||
|
try:
|
||||||
|
from crazy_functions.图片生成 import gen_image
|
||||||
|
prompt_ = predict_no_ui_long_connection(inputs=story_paragraph, llm_kwargs=self.llm_kwargs, history=[], sys_prompt='你需要根据用户给出的小说段落,进行简短的环境描写。要求:80字以内。')
|
||||||
|
image_url, image_path = gen_image(self.llm_kwargs, prompt_, '512x512', model="dall-e-2", quality='standard', style='natural')
|
||||||
|
return f'<br/><div align="center"><img src="file={image_path}"></div>'
|
||||||
|
except:
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def step(self, prompt, chatbot, history):
|
||||||
|
|
||||||
|
"""
|
||||||
|
首先,处理游戏初始化等特殊情况
|
||||||
|
"""
|
||||||
|
if self.step_cnt == 0:
|
||||||
|
self.begin_game_step_0(prompt, chatbot, history)
|
||||||
|
self.lock_plugin(chatbot)
|
||||||
|
self.cur_task = 'head_start'
|
||||||
|
else:
|
||||||
|
if prompt.strip() == 'exit' or prompt.strip() == '结束剧情':
|
||||||
|
# should we terminate game here?
|
||||||
|
self.delete_game = True
|
||||||
|
yield from update_ui_lastest_msg(lastmsg=f"游戏结束。", chatbot=chatbot, history=history, delay=0.)
|
||||||
|
return
|
||||||
|
if '剧情收尾' in prompt:
|
||||||
|
self.cur_task = 'story_terminate'
|
||||||
|
# # well, game resumes
|
||||||
|
# chatbot.append([prompt, ""])
|
||||||
|
# update ui, don't keep the user waiting
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
处理游戏的主体逻辑
|
||||||
|
"""
|
||||||
|
if self.cur_task == 'head_start':
|
||||||
|
"""
|
||||||
|
这是游戏的第一步
|
||||||
|
"""
|
||||||
|
inputs_ = prompts_hs.format(headstart=self.headstart)
|
||||||
|
history_ = []
|
||||||
|
story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
inputs_, '故事开头', self.llm_kwargs,
|
||||||
|
chatbot, history_, self.sys_prompt_
|
||||||
|
)
|
||||||
|
self.story.append(story_paragraph)
|
||||||
|
# # 配图
|
||||||
|
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
||||||
|
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
||||||
|
|
||||||
|
# # 构建后续剧情引导
|
||||||
|
previously_on_story = ""
|
||||||
|
for s in self.story:
|
||||||
|
previously_on_story += s + '\n'
|
||||||
|
inputs_ = prompts_interact.format(previously_on_story=previously_on_story)
|
||||||
|
history_ = []
|
||||||
|
self.next_choices = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
inputs_, '请在以下几种故事走向中,选择一种(当然,您也可以选择给出其他故事走向):', self.llm_kwargs,
|
||||||
|
chatbot,
|
||||||
|
history_,
|
||||||
|
self.sys_prompt_
|
||||||
|
)
|
||||||
|
self.cur_task = 'user_choice'
|
||||||
|
|
||||||
|
|
||||||
|
elif self.cur_task == 'user_choice':
|
||||||
|
"""
|
||||||
|
根据用户的提示,确定故事的下一步
|
||||||
|
"""
|
||||||
|
if '请在以下几种故事走向中,选择一种' in chatbot[-1][0]: chatbot.pop(-1)
|
||||||
|
previously_on_story = ""
|
||||||
|
for s in self.story:
|
||||||
|
previously_on_story += s + '\n'
|
||||||
|
inputs_ = prompts_resume.format(previously_on_story=previously_on_story, choice=self.next_choices, user_choice=prompt)
|
||||||
|
history_ = []
|
||||||
|
story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
inputs_, f'下一段故事(您的选择是:{prompt})。', self.llm_kwargs,
|
||||||
|
chatbot, history_, self.sys_prompt_
|
||||||
|
)
|
||||||
|
self.story.append(story_paragraph)
|
||||||
|
# # 配图
|
||||||
|
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
||||||
|
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
||||||
|
|
||||||
|
# # 构建后续剧情引导
|
||||||
|
previously_on_story = ""
|
||||||
|
for s in self.story:
|
||||||
|
previously_on_story += s + '\n'
|
||||||
|
inputs_ = prompts_interact.format(previously_on_story=previously_on_story)
|
||||||
|
history_ = []
|
||||||
|
self.next_choices = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
inputs_,
|
||||||
|
'请在以下几种故事走向中,选择一种。当然,您也可以给出您心中的其他故事走向。另外,如果您希望剧情立即收尾,请输入剧情走向,并以“剧情收尾”四个字提示程序。', self.llm_kwargs,
|
||||||
|
chatbot,
|
||||||
|
history_,
|
||||||
|
self.sys_prompt_
|
||||||
|
)
|
||||||
|
self.cur_task = 'user_choice'
|
||||||
|
|
||||||
|
|
||||||
|
elif self.cur_task == 'story_terminate':
|
||||||
|
"""
|
||||||
|
根据用户的提示,确定故事的结局
|
||||||
|
"""
|
||||||
|
previously_on_story = ""
|
||||||
|
for s in self.story:
|
||||||
|
previously_on_story += s + '\n'
|
||||||
|
inputs_ = prompts_terminate.format(previously_on_story=previously_on_story, user_choice=prompt)
|
||||||
|
history_ = []
|
||||||
|
story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
inputs_, f'故事收尾(您的选择是:{prompt})。', self.llm_kwargs,
|
||||||
|
chatbot, history_, self.sys_prompt_
|
||||||
|
)
|
||||||
|
# # 配图
|
||||||
|
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
||||||
|
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
||||||
|
|
||||||
|
# terminate game
|
||||||
|
self.delete_game = True
|
||||||
|
return
|
||||||
@@ -0,0 +1,35 @@
|
|||||||
|
|
||||||
|
from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
|
||||||
|
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||||
|
def get_code_block(reply):
|
||||||
|
import re
|
||||||
|
pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
|
||||||
|
matches = re.findall(pattern, reply) # find all code blocks in text
|
||||||
|
if len(matches) == 1:
|
||||||
|
return "```" + matches[0] + "```" # code block
|
||||||
|
raise RuntimeError("GPT is not generating proper code.")
|
||||||
|
|
||||||
|
def is_same_thing(a, b, llm_kwargs):
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
class IsSameThing(BaseModel):
|
||||||
|
is_same_thing: bool = Field(description="determine whether two objects are same thing.", default=False)
|
||||||
|
|
||||||
|
def run_gpt_fn(inputs, sys_prompt, history=[]):
|
||||||
|
return predict_no_ui_long_connection(
|
||||||
|
inputs=inputs, llm_kwargs=llm_kwargs,
|
||||||
|
history=history, sys_prompt=sys_prompt, observe_window=[]
|
||||||
|
)
|
||||||
|
|
||||||
|
gpt_json_io = GptJsonIO(IsSameThing)
|
||||||
|
inputs_01 = "Identity whether the user input and the target is the same thing: \n target object: {a} \n user input object: {b} \n\n\n".format(a=a, b=b)
|
||||||
|
inputs_01 += "\n\n\n Note that the user may describe the target object with a different language, e.g. cat and 猫 are the same thing."
|
||||||
|
analyze_res_cot_01 = run_gpt_fn(inputs_01, "", [])
|
||||||
|
|
||||||
|
inputs_02 = inputs_01 + gpt_json_io.format_instructions
|
||||||
|
analyze_res = run_gpt_fn(inputs_02, "", [inputs_01, analyze_res_cot_01])
|
||||||
|
|
||||||
|
try:
|
||||||
|
res = gpt_json_io.generate_output_auto_repair(analyze_res, run_gpt_fn)
|
||||||
|
return res.is_same_thing
|
||||||
|
except JsonStringError as e:
|
||||||
|
return False
|
||||||
37
crazy_functions/ipc_fns/mp.py
普通文件
37
crazy_functions/ipc_fns/mp.py
普通文件
@@ -0,0 +1,37 @@
|
|||||||
|
import platform
|
||||||
|
import pickle
|
||||||
|
import multiprocessing
|
||||||
|
|
||||||
|
def run_in_subprocess_wrapper_func(v_args):
|
||||||
|
func, args, kwargs, return_dict, exception_dict = pickle.loads(v_args)
|
||||||
|
import sys
|
||||||
|
try:
|
||||||
|
result = func(*args, **kwargs)
|
||||||
|
return_dict['result'] = result
|
||||||
|
except Exception as e:
|
||||||
|
exc_info = sys.exc_info()
|
||||||
|
exception_dict['exception'] = exc_info
|
||||||
|
|
||||||
|
def run_in_subprocess_with_timeout(func, timeout=60):
|
||||||
|
if platform.system() == 'Linux':
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
return_dict = multiprocessing.Manager().dict()
|
||||||
|
exception_dict = multiprocessing.Manager().dict()
|
||||||
|
v_args = pickle.dumps((func, args, kwargs, return_dict, exception_dict))
|
||||||
|
process = multiprocessing.Process(target=run_in_subprocess_wrapper_func, args=(v_args,))
|
||||||
|
process.start()
|
||||||
|
process.join(timeout)
|
||||||
|
if process.is_alive():
|
||||||
|
process.terminate()
|
||||||
|
raise TimeoutError(f'功能单元{str(func)}未能在规定时间内完成任务')
|
||||||
|
process.close()
|
||||||
|
if 'exception' in exception_dict:
|
||||||
|
# ooops, the subprocess ran into an exception
|
||||||
|
exc_info = exception_dict['exception']
|
||||||
|
raise exc_info[1].with_traceback(exc_info[2])
|
||||||
|
if 'result' in return_dict.keys():
|
||||||
|
# If the subprocess ran successfully, return the result
|
||||||
|
return return_dict['result']
|
||||||
|
return wrapper
|
||||||
|
else:
|
||||||
|
return func
|
||||||
@@ -95,11 +95,14 @@ class LatexPaperSplit():
|
|||||||
self.abstract = "unknown"
|
self.abstract = "unknown"
|
||||||
|
|
||||||
def read_title_and_abstract(self, txt):
|
def read_title_and_abstract(self, txt):
|
||||||
title, abstract = find_title_and_abs(txt)
|
try:
|
||||||
if title is not None:
|
title, abstract = find_title_and_abs(txt)
|
||||||
self.title = title.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '')
|
if title is not None:
|
||||||
if abstract is not None:
|
self.title = title.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '')
|
||||||
self.abstract = abstract.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '')
|
if abstract is not None:
|
||||||
|
self.abstract = abstract.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '')
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
def merge_result(self, arr, mode, msg, buggy_lines=[], buggy_line_surgery_n_lines=10):
|
def merge_result(self, arr, mode, msg, buggy_lines=[], buggy_line_surgery_n_lines=10):
|
||||||
"""
|
"""
|
||||||
@@ -172,7 +175,6 @@ class LatexPaperFileGroup():
|
|||||||
self.sp_file_contents = []
|
self.sp_file_contents = []
|
||||||
self.sp_file_index = []
|
self.sp_file_index = []
|
||||||
self.sp_file_tag = []
|
self.sp_file_tag = []
|
||||||
|
|
||||||
# count_token
|
# count_token
|
||||||
from request_llms.bridge_all import model_info
|
from request_llms.bridge_all import model_info
|
||||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||||
@@ -189,13 +191,12 @@ class LatexPaperFileGroup():
|
|||||||
self.sp_file_index.append(index)
|
self.sp_file_index.append(index)
|
||||||
self.sp_file_tag.append(self.file_paths[index])
|
self.sp_file_tag.append(self.file_paths[index])
|
||||||
else:
|
else:
|
||||||
from ..crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||||
for j, segment in enumerate(segments):
|
for j, segment in enumerate(segments):
|
||||||
self.sp_file_contents.append(segment)
|
self.sp_file_contents.append(segment)
|
||||||
self.sp_file_index.append(index)
|
self.sp_file_index.append(index)
|
||||||
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
|
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
|
||||||
print('Segmentation: done')
|
|
||||||
|
|
||||||
def merge_result(self):
|
def merge_result(self):
|
||||||
self.file_result = ["" for _ in range(len(self.file_paths))]
|
self.file_result = ["" for _ in range(len(self.file_paths))]
|
||||||
@@ -265,12 +266,12 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin
|
|||||||
|
|
||||||
else:
|
else:
|
||||||
# <-------- gpt 多线程请求 ---------->
|
# <-------- gpt 多线程请求 ---------->
|
||||||
LATEX_EXPERIMENTAL, = get_conf('LATEX_EXPERIMENTAL')
|
|
||||||
history_array = [[""] for _ in range(n_split)]
|
history_array = [[""] for _ in range(n_split)]
|
||||||
if LATEX_EXPERIMENTAL:
|
# LATEX_EXPERIMENTAL, = get_conf('LATEX_EXPERIMENTAL')
|
||||||
paper_meta = f"The paper you processing is `{lps.title}`, a part of the abstraction is `{lps.abstract}`"
|
# if LATEX_EXPERIMENTAL:
|
||||||
paper_meta_max_len = 888
|
# paper_meta = f"The paper you processing is `{lps.title}`, a part of the abstraction is `{lps.abstract}`"
|
||||||
history_array = [[ paper_meta[:paper_meta_max_len] + '...', "Understand, what should I do?"] for _ in range(n_split)]
|
# paper_meta_max_len = 888
|
||||||
|
# history_array = [[ paper_meta[:paper_meta_max_len] + '...', "Understand, what should I do?"] for _ in range(n_split)]
|
||||||
|
|
||||||
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||||
inputs_array=inputs_array,
|
inputs_array=inputs_array,
|
||||||
@@ -401,7 +402,7 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
|
|||||||
result_pdf = pj(work_folder_modified, f'merge_diff.pdf') # get pdf path
|
result_pdf = pj(work_folder_modified, f'merge_diff.pdf') # get pdf path
|
||||||
promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
|
promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
|
||||||
if modified_pdf_success:
|
if modified_pdf_success:
|
||||||
yield from update_ui_lastest_msg(f'转化PDF编译已经成功, 即将退出 ...', chatbot, history) # 刷新Gradio前端界面
|
yield from update_ui_lastest_msg(f'转化PDF编译已经成功, 正在尝试生成对比PDF, 请稍候 ...', chatbot, history) # 刷新Gradio前端界面
|
||||||
result_pdf = pj(work_folder_modified, f'{main_file_modified}.pdf') # get pdf path
|
result_pdf = pj(work_folder_modified, f'{main_file_modified}.pdf') # get pdf path
|
||||||
origin_pdf = pj(work_folder_original, f'{main_file_original}.pdf') # get pdf path
|
origin_pdf = pj(work_folder_original, f'{main_file_original}.pdf') # get pdf path
|
||||||
if os.path.exists(pj(work_folder, '..', 'translation')):
|
if os.path.exists(pj(work_folder, '..', 'translation')):
|
||||||
@@ -413,8 +414,11 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
|
|||||||
from .latex_toolbox import merge_pdfs
|
from .latex_toolbox import merge_pdfs
|
||||||
concat_pdf = pj(work_folder_modified, f'comparison.pdf')
|
concat_pdf = pj(work_folder_modified, f'comparison.pdf')
|
||||||
merge_pdfs(origin_pdf, result_pdf, concat_pdf)
|
merge_pdfs(origin_pdf, result_pdf, concat_pdf)
|
||||||
|
if os.path.exists(pj(work_folder, '..', 'translation')):
|
||||||
|
shutil.copyfile(concat_pdf, pj(work_folder, '..', 'translation', 'comparison.pdf'))
|
||||||
promote_file_to_downloadzone(concat_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
|
promote_file_to_downloadzone(concat_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
pass
|
pass
|
||||||
return True # 成功啦
|
return True # 成功啦
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -1,15 +1,18 @@
|
|||||||
import os, shutil
|
import os, shutil
|
||||||
import re
|
import re
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
PRESERVE = 0
|
PRESERVE = 0
|
||||||
TRANSFORM = 1
|
TRANSFORM = 1
|
||||||
|
|
||||||
pj = os.path.join
|
pj = os.path.join
|
||||||
|
|
||||||
class LinkedListNode():
|
|
||||||
|
class LinkedListNode:
|
||||||
"""
|
"""
|
||||||
Linked List Node
|
Linked List Node
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, string, preserve=True) -> None:
|
def __init__(self, string, preserve=True) -> None:
|
||||||
self.string = string
|
self.string = string
|
||||||
self.preserve = preserve
|
self.preserve = preserve
|
||||||
@@ -18,19 +21,22 @@ class LinkedListNode():
|
|||||||
# self.begin_line = 0
|
# self.begin_line = 0
|
||||||
# self.begin_char = 0
|
# self.begin_char = 0
|
||||||
|
|
||||||
|
|
||||||
def convert_to_linklist(text, mask):
|
def convert_to_linklist(text, mask):
|
||||||
root = LinkedListNode("", preserve=True)
|
root = LinkedListNode("", preserve=True)
|
||||||
current_node = root
|
current_node = root
|
||||||
for c, m, i in zip(text, mask, range(len(text))):
|
for c, m, i in zip(text, mask, range(len(text))):
|
||||||
if (m==PRESERVE and current_node.preserve) \
|
if (m == PRESERVE and current_node.preserve) or (
|
||||||
or (m==TRANSFORM and not current_node.preserve):
|
m == TRANSFORM and not current_node.preserve
|
||||||
|
):
|
||||||
# add
|
# add
|
||||||
current_node.string += c
|
current_node.string += c
|
||||||
else:
|
else:
|
||||||
current_node.next = LinkedListNode(c, preserve=(m==PRESERVE))
|
current_node.next = LinkedListNode(c, preserve=(m == PRESERVE))
|
||||||
current_node = current_node.next
|
current_node = current_node.next
|
||||||
return root
|
return root
|
||||||
|
|
||||||
|
|
||||||
def post_process(root):
|
def post_process(root):
|
||||||
# 修复括号
|
# 修复括号
|
||||||
node = root
|
node = root
|
||||||
@@ -38,21 +44,24 @@ def post_process(root):
|
|||||||
string = node.string
|
string = node.string
|
||||||
if node.preserve:
|
if node.preserve:
|
||||||
node = node.next
|
node = node.next
|
||||||
if node is None: break
|
if node is None:
|
||||||
|
break
|
||||||
continue
|
continue
|
||||||
|
|
||||||
def break_check(string):
|
def break_check(string):
|
||||||
str_stack = [""] # (lv, index)
|
str_stack = [""] # (lv, index)
|
||||||
for i, c in enumerate(string):
|
for i, c in enumerate(string):
|
||||||
if c == '{':
|
if c == "{":
|
||||||
str_stack.append('{')
|
str_stack.append("{")
|
||||||
elif c == '}':
|
elif c == "}":
|
||||||
if len(str_stack) == 1:
|
if len(str_stack) == 1:
|
||||||
print('stack fix')
|
print("stack fix")
|
||||||
return i
|
return i
|
||||||
str_stack.pop(-1)
|
str_stack.pop(-1)
|
||||||
else:
|
else:
|
||||||
str_stack[-1] += c
|
str_stack[-1] += c
|
||||||
return -1
|
return -1
|
||||||
|
|
||||||
bp = break_check(string)
|
bp = break_check(string)
|
||||||
|
|
||||||
if bp == -1:
|
if bp == -1:
|
||||||
@@ -69,51 +78,66 @@ def post_process(root):
|
|||||||
node.next = q
|
node.next = q
|
||||||
|
|
||||||
node = node.next
|
node = node.next
|
||||||
if node is None: break
|
if node is None:
|
||||||
|
break
|
||||||
|
|
||||||
# 屏蔽空行和太短的句子
|
# 屏蔽空行和太短的句子
|
||||||
node = root
|
node = root
|
||||||
while True:
|
while True:
|
||||||
if len(node.string.strip('\n').strip(''))==0: node.preserve = True
|
if len(node.string.strip("\n").strip("")) == 0:
|
||||||
if len(node.string.strip('\n').strip(''))<42: node.preserve = True
|
node.preserve = True
|
||||||
|
if len(node.string.strip("\n").strip("")) < 42:
|
||||||
|
node.preserve = True
|
||||||
node = node.next
|
node = node.next
|
||||||
if node is None: break
|
if node is None:
|
||||||
|
break
|
||||||
node = root
|
node = root
|
||||||
while True:
|
while True:
|
||||||
if node.next and node.preserve and node.next.preserve:
|
if node.next and node.preserve and node.next.preserve:
|
||||||
node.string += node.next.string
|
node.string += node.next.string
|
||||||
node.next = node.next.next
|
node.next = node.next.next
|
||||||
node = node.next
|
node = node.next
|
||||||
if node is None: break
|
if node is None:
|
||||||
|
break
|
||||||
|
|
||||||
# 将前后断行符脱离
|
# 将前后断行符脱离
|
||||||
node = root
|
node = root
|
||||||
prev_node = None
|
prev_node = None
|
||||||
while True:
|
while True:
|
||||||
if not node.preserve:
|
if not node.preserve:
|
||||||
lstriped_ = node.string.lstrip().lstrip('\n')
|
lstriped_ = node.string.lstrip().lstrip("\n")
|
||||||
if (prev_node is not None) and (prev_node.preserve) and (len(lstriped_)!=len(node.string)):
|
if (
|
||||||
prev_node.string += node.string[:-len(lstriped_)]
|
(prev_node is not None)
|
||||||
|
and (prev_node.preserve)
|
||||||
|
and (len(lstriped_) != len(node.string))
|
||||||
|
):
|
||||||
|
prev_node.string += node.string[: -len(lstriped_)]
|
||||||
node.string = lstriped_
|
node.string = lstriped_
|
||||||
rstriped_ = node.string.rstrip().rstrip('\n')
|
rstriped_ = node.string.rstrip().rstrip("\n")
|
||||||
if (node.next is not None) and (node.next.preserve) and (len(rstriped_)!=len(node.string)):
|
if (
|
||||||
node.next.string = node.string[len(rstriped_):] + node.next.string
|
(node.next is not None)
|
||||||
|
and (node.next.preserve)
|
||||||
|
and (len(rstriped_) != len(node.string))
|
||||||
|
):
|
||||||
|
node.next.string = node.string[len(rstriped_) :] + node.next.string
|
||||||
node.string = rstriped_
|
node.string = rstriped_
|
||||||
# =====
|
# =-=-=
|
||||||
prev_node = node
|
prev_node = node
|
||||||
node = node.next
|
node = node.next
|
||||||
if node is None: break
|
if node is None:
|
||||||
|
break
|
||||||
|
|
||||||
# 标注节点的行数范围
|
# 标注节点的行数范围
|
||||||
node = root
|
node = root
|
||||||
n_line = 0
|
n_line = 0
|
||||||
expansion = 2
|
expansion = 2
|
||||||
while True:
|
while True:
|
||||||
n_l = node.string.count('\n')
|
n_l = node.string.count("\n")
|
||||||
node.range = [n_line-expansion, n_line+n_l+expansion] # 失败时,扭转的范围
|
node.range = [n_line - expansion, n_line + n_l + expansion] # 失败时,扭转的范围
|
||||||
n_line = n_line+n_l
|
n_line = n_line + n_l
|
||||||
node = node.next
|
node = node.next
|
||||||
if node is None: break
|
if node is None:
|
||||||
|
break
|
||||||
return root
|
return root
|
||||||
|
|
||||||
|
|
||||||
@@ -131,12 +155,14 @@ def set_forbidden_text(text, mask, pattern, flags=0):
|
|||||||
you can mask out (mask = PRESERVE so that text become untouchable for GPT)
|
you can mask out (mask = PRESERVE so that text become untouchable for GPT)
|
||||||
everything between "\begin{equation}" and "\end{equation}"
|
everything between "\begin{equation}" and "\end{equation}"
|
||||||
"""
|
"""
|
||||||
if isinstance(pattern, list): pattern = '|'.join(pattern)
|
if isinstance(pattern, list):
|
||||||
|
pattern = "|".join(pattern)
|
||||||
pattern_compile = re.compile(pattern, flags)
|
pattern_compile = re.compile(pattern, flags)
|
||||||
for res in pattern_compile.finditer(text):
|
for res in pattern_compile.finditer(text):
|
||||||
mask[res.span()[0]:res.span()[1]] = PRESERVE
|
mask[res.span()[0] : res.span()[1]] = PRESERVE
|
||||||
return text, mask
|
return text, mask
|
||||||
|
|
||||||
|
|
||||||
def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
|
def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
|
||||||
"""
|
"""
|
||||||
Move area out of preserve area (make text editable for GPT)
|
Move area out of preserve area (make text editable for GPT)
|
||||||
@@ -144,17 +170,19 @@ def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
|
|||||||
e.g.
|
e.g.
|
||||||
\begin{abstract} blablablablablabla. \end{abstract}
|
\begin{abstract} blablablablablabla. \end{abstract}
|
||||||
"""
|
"""
|
||||||
if isinstance(pattern, list): pattern = '|'.join(pattern)
|
if isinstance(pattern, list):
|
||||||
|
pattern = "|".join(pattern)
|
||||||
pattern_compile = re.compile(pattern, flags)
|
pattern_compile = re.compile(pattern, flags)
|
||||||
for res in pattern_compile.finditer(text):
|
for res in pattern_compile.finditer(text):
|
||||||
if not forbid_wrapper:
|
if not forbid_wrapper:
|
||||||
mask[res.span()[0]:res.span()[1]] = TRANSFORM
|
mask[res.span()[0] : res.span()[1]] = TRANSFORM
|
||||||
else:
|
else:
|
||||||
mask[res.regs[0][0]: res.regs[1][0]] = PRESERVE # '\\begin{abstract}'
|
mask[res.regs[0][0] : res.regs[1][0]] = PRESERVE # '\\begin{abstract}'
|
||||||
mask[res.regs[1][0]: res.regs[1][1]] = TRANSFORM # abstract
|
mask[res.regs[1][0] : res.regs[1][1]] = TRANSFORM # abstract
|
||||||
mask[res.regs[1][1]: res.regs[0][1]] = PRESERVE # abstract
|
mask[res.regs[1][1] : res.regs[0][1]] = PRESERVE # abstract
|
||||||
return text, mask
|
return text, mask
|
||||||
|
|
||||||
|
|
||||||
def set_forbidden_text_careful_brace(text, mask, pattern, flags=0):
|
def set_forbidden_text_careful_brace(text, mask, pattern, flags=0):
|
||||||
"""
|
"""
|
||||||
Add a preserve text area in this paper (text become untouchable for GPT).
|
Add a preserve text area in this paper (text become untouchable for GPT).
|
||||||
@@ -166,16 +194,22 @@ def set_forbidden_text_careful_brace(text, mask, pattern, flags=0):
|
|||||||
for res in pattern_compile.finditer(text):
|
for res in pattern_compile.finditer(text):
|
||||||
brace_level = -1
|
brace_level = -1
|
||||||
p = begin = end = res.regs[0][0]
|
p = begin = end = res.regs[0][0]
|
||||||
for _ in range(1024*16):
|
for _ in range(1024 * 16):
|
||||||
if text[p] == '}' and brace_level == 0: break
|
if text[p] == "}" and brace_level == 0:
|
||||||
elif text[p] == '}': brace_level -= 1
|
break
|
||||||
elif text[p] == '{': brace_level += 1
|
elif text[p] == "}":
|
||||||
|
brace_level -= 1
|
||||||
|
elif text[p] == "{":
|
||||||
|
brace_level += 1
|
||||||
p += 1
|
p += 1
|
||||||
end = p+1
|
end = p + 1
|
||||||
mask[begin:end] = PRESERVE
|
mask[begin:end] = PRESERVE
|
||||||
return text, mask
|
return text, mask
|
||||||
|
|
||||||
def reverse_forbidden_text_careful_brace(text, mask, pattern, flags=0, forbid_wrapper=True):
|
|
||||||
|
def reverse_forbidden_text_careful_brace(
|
||||||
|
text, mask, pattern, flags=0, forbid_wrapper=True
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Move area out of preserve area (make text editable for GPT)
|
Move area out of preserve area (make text editable for GPT)
|
||||||
count the number of the braces so as to catch compelete text area.
|
count the number of the braces so as to catch compelete text area.
|
||||||
@@ -186,39 +220,57 @@ def reverse_forbidden_text_careful_brace(text, mask, pattern, flags=0, forbid_wr
|
|||||||
for res in pattern_compile.finditer(text):
|
for res in pattern_compile.finditer(text):
|
||||||
brace_level = 0
|
brace_level = 0
|
||||||
p = begin = end = res.regs[1][0]
|
p = begin = end = res.regs[1][0]
|
||||||
for _ in range(1024*16):
|
for _ in range(1024 * 16):
|
||||||
if text[p] == '}' and brace_level == 0: break
|
if text[p] == "}" and brace_level == 0:
|
||||||
elif text[p] == '}': brace_level -= 1
|
break
|
||||||
elif text[p] == '{': brace_level += 1
|
elif text[p] == "}":
|
||||||
|
brace_level -= 1
|
||||||
|
elif text[p] == "{":
|
||||||
|
brace_level += 1
|
||||||
p += 1
|
p += 1
|
||||||
end = p
|
end = p
|
||||||
mask[begin:end] = TRANSFORM
|
mask[begin:end] = TRANSFORM
|
||||||
if forbid_wrapper:
|
if forbid_wrapper:
|
||||||
mask[res.regs[0][0]:begin] = PRESERVE
|
mask[res.regs[0][0] : begin] = PRESERVE
|
||||||
mask[end:res.regs[0][1]] = PRESERVE
|
mask[end : res.regs[0][1]] = PRESERVE
|
||||||
return text, mask
|
return text, mask
|
||||||
|
|
||||||
|
|
||||||
def set_forbidden_text_begin_end(text, mask, pattern, flags=0, limit_n_lines=42):
|
def set_forbidden_text_begin_end(text, mask, pattern, flags=0, limit_n_lines=42):
|
||||||
"""
|
"""
|
||||||
Find all \begin{} ... \end{} text block that with less than limit_n_lines lines.
|
Find all \begin{} ... \end{} text block that with less than limit_n_lines lines.
|
||||||
Add it to preserve area
|
Add it to preserve area
|
||||||
"""
|
"""
|
||||||
pattern_compile = re.compile(pattern, flags)
|
pattern_compile = re.compile(pattern, flags)
|
||||||
|
|
||||||
def search_with_line_limit(text, mask):
|
def search_with_line_limit(text, mask):
|
||||||
for res in pattern_compile.finditer(text):
|
for res in pattern_compile.finditer(text):
|
||||||
cmd = res.group(1) # begin{what}
|
cmd = res.group(1) # begin{what}
|
||||||
this = res.group(2) # content between begin and end
|
this = res.group(2) # content between begin and end
|
||||||
this_mask = mask[res.regs[2][0]:res.regs[2][1]]
|
this_mask = mask[res.regs[2][0] : res.regs[2][1]]
|
||||||
white_list = ['document', 'abstract', 'lemma', 'definition', 'sproof',
|
white_list = [
|
||||||
'em', 'emph', 'textit', 'textbf', 'itemize', 'enumerate']
|
"document",
|
||||||
if (cmd in white_list) or this.count('\n') >= limit_n_lines: # use a magical number 42
|
"abstract",
|
||||||
|
"lemma",
|
||||||
|
"definition",
|
||||||
|
"sproof",
|
||||||
|
"em",
|
||||||
|
"emph",
|
||||||
|
"textit",
|
||||||
|
"textbf",
|
||||||
|
"itemize",
|
||||||
|
"enumerate",
|
||||||
|
]
|
||||||
|
if (cmd in white_list) or this.count(
|
||||||
|
"\n"
|
||||||
|
) >= limit_n_lines: # use a magical number 42
|
||||||
this, this_mask = search_with_line_limit(this, this_mask)
|
this, this_mask = search_with_line_limit(this, this_mask)
|
||||||
mask[res.regs[2][0]:res.regs[2][1]] = this_mask
|
mask[res.regs[2][0] : res.regs[2][1]] = this_mask
|
||||||
else:
|
else:
|
||||||
mask[res.regs[0][0]:res.regs[0][1]] = PRESERVE
|
mask[res.regs[0][0] : res.regs[0][1]] = PRESERVE
|
||||||
return text, mask
|
return text, mask
|
||||||
return search_with_line_limit(text, mask)
|
|
||||||
|
|
||||||
|
return search_with_line_limit(text, mask)
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
@@ -227,6 +279,7 @@ Latex Merge File
|
|||||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def find_main_tex_file(file_manifest, mode):
|
def find_main_tex_file(file_manifest, mode):
|
||||||
"""
|
"""
|
||||||
在多Tex文档中,寻找主文件,必须包含documentclass,返回找到的第一个。
|
在多Tex文档中,寻找主文件,必须包含documentclass,返回找到的第一个。
|
||||||
@@ -234,27 +287,36 @@ def find_main_tex_file(file_manifest, mode):
|
|||||||
"""
|
"""
|
||||||
canidates = []
|
canidates = []
|
||||||
for texf in file_manifest:
|
for texf in file_manifest:
|
||||||
if os.path.basename(texf).startswith('merge'):
|
if os.path.basename(texf).startswith("merge"):
|
||||||
continue
|
continue
|
||||||
with open(texf, 'r', encoding='utf8', errors='ignore') as f:
|
with open(texf, "r", encoding="utf8", errors="ignore") as f:
|
||||||
file_content = f.read()
|
file_content = f.read()
|
||||||
if r'\documentclass' in file_content:
|
if r"\documentclass" in file_content:
|
||||||
canidates.append(texf)
|
canidates.append(texf)
|
||||||
else:
|
else:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if len(canidates) == 0:
|
if len(canidates) == 0:
|
||||||
raise RuntimeError('无法找到一个主Tex文件(包含documentclass关键字)')
|
raise RuntimeError("无法找到一个主Tex文件(包含documentclass关键字)")
|
||||||
elif len(canidates) == 1:
|
elif len(canidates) == 1:
|
||||||
return canidates[0]
|
return canidates[0]
|
||||||
else: # if len(canidates) >= 2 通过一些Latex模板中常见(但通常不会出现在正文)的单词,对不同latex源文件扣分,取评分最高者返回
|
else: # if len(canidates) >= 2 通过一些Latex模板中常见(但通常不会出现在正文)的单词,对不同latex源文件扣分,取评分最高者返回
|
||||||
canidates_score = []
|
canidates_score = []
|
||||||
# 给出一些判定模板文档的词作为扣分项
|
# 给出一些判定模板文档的词作为扣分项
|
||||||
unexpected_words = ['\LaTeX', 'manuscript', 'Guidelines', 'font', 'citations', 'rejected', 'blind review', 'reviewers']
|
unexpected_words = [
|
||||||
expected_words = ['\input', '\ref', '\cite']
|
"\\LaTeX",
|
||||||
|
"manuscript",
|
||||||
|
"Guidelines",
|
||||||
|
"font",
|
||||||
|
"citations",
|
||||||
|
"rejected",
|
||||||
|
"blind review",
|
||||||
|
"reviewers",
|
||||||
|
]
|
||||||
|
expected_words = ["\\input", "\\ref", "\\cite"]
|
||||||
for texf in canidates:
|
for texf in canidates:
|
||||||
canidates_score.append(0)
|
canidates_score.append(0)
|
||||||
with open(texf, 'r', encoding='utf8', errors='ignore') as f:
|
with open(texf, "r", encoding="utf8", errors="ignore") as f:
|
||||||
file_content = f.read()
|
file_content = f.read()
|
||||||
file_content = rm_comments(file_content)
|
file_content = rm_comments(file_content)
|
||||||
for uw in unexpected_words:
|
for uw in unexpected_words:
|
||||||
@@ -263,9 +325,10 @@ def find_main_tex_file(file_manifest, mode):
|
|||||||
for uw in expected_words:
|
for uw in expected_words:
|
||||||
if uw in file_content:
|
if uw in file_content:
|
||||||
canidates_score[-1] += 1
|
canidates_score[-1] += 1
|
||||||
select = np.argmax(canidates_score) # 取评分最高者返回
|
select = np.argmax(canidates_score) # 取评分最高者返回
|
||||||
return canidates[select]
|
return canidates[select]
|
||||||
|
|
||||||
|
|
||||||
def rm_comments(main_file):
|
def rm_comments(main_file):
|
||||||
new_file_remove_comment_lines = []
|
new_file_remove_comment_lines = []
|
||||||
for l in main_file.splitlines():
|
for l in main_file.splitlines():
|
||||||
@@ -274,30 +337,39 @@ def rm_comments(main_file):
|
|||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
new_file_remove_comment_lines.append(l)
|
new_file_remove_comment_lines.append(l)
|
||||||
main_file = '\n'.join(new_file_remove_comment_lines)
|
main_file = "\n".join(new_file_remove_comment_lines)
|
||||||
# main_file = re.sub(r"\\include{(.*?)}", r"\\input{\1}", main_file) # 将 \include 命令转换为 \input 命令
|
# main_file = re.sub(r"\\include{(.*?)}", r"\\input{\1}", main_file) # 将 \include 命令转换为 \input 命令
|
||||||
main_file = re.sub(r'(?<!\\)%.*', '', main_file) # 使用正则表达式查找半行注释, 并替换为空字符串
|
main_file = re.sub(r"(?<!\\)%.*", "", main_file) # 使用正则表达式查找半行注释, 并替换为空字符串
|
||||||
return main_file
|
return main_file
|
||||||
|
|
||||||
|
|
||||||
def find_tex_file_ignore_case(fp):
|
def find_tex_file_ignore_case(fp):
|
||||||
dir_name = os.path.dirname(fp)
|
dir_name = os.path.dirname(fp)
|
||||||
base_name = os.path.basename(fp)
|
base_name = os.path.basename(fp)
|
||||||
# 如果输入的文件路径是正确的
|
# 如果输入的文件路径是正确的
|
||||||
if os.path.exists(pj(dir_name, base_name)): return pj(dir_name, base_name)
|
if os.path.isfile(pj(dir_name, base_name)):
|
||||||
|
return pj(dir_name, base_name)
|
||||||
# 如果不正确,试着加上.tex后缀试试
|
# 如果不正确,试着加上.tex后缀试试
|
||||||
if not base_name.endswith('.tex'): base_name+='.tex'
|
if not base_name.endswith(".tex"):
|
||||||
if os.path.exists(pj(dir_name, base_name)): return pj(dir_name, base_name)
|
base_name += ".tex"
|
||||||
|
if os.path.isfile(pj(dir_name, base_name)):
|
||||||
|
return pj(dir_name, base_name)
|
||||||
# 如果还找不到,解除大小写限制,再试一次
|
# 如果还找不到,解除大小写限制,再试一次
|
||||||
import glob
|
import glob
|
||||||
for f in glob.glob(dir_name+'/*.tex'):
|
|
||||||
|
for f in glob.glob(dir_name + "/*.tex"):
|
||||||
base_name_s = os.path.basename(fp)
|
base_name_s = os.path.basename(fp)
|
||||||
base_name_f = os.path.basename(f)
|
base_name_f = os.path.basename(f)
|
||||||
if base_name_s.lower() == base_name_f.lower(): return f
|
if base_name_s.lower() == base_name_f.lower():
|
||||||
|
return f
|
||||||
# 试着加上.tex后缀试试
|
# 试着加上.tex后缀试试
|
||||||
if not base_name_s.endswith('.tex'): base_name_s+='.tex'
|
if not base_name_s.endswith(".tex"):
|
||||||
if base_name_s.lower() == base_name_f.lower(): return f
|
base_name_s += ".tex"
|
||||||
|
if base_name_s.lower() == base_name_f.lower():
|
||||||
|
return f
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def merge_tex_files_(project_foler, main_file, mode):
|
def merge_tex_files_(project_foler, main_file, mode):
|
||||||
"""
|
"""
|
||||||
Merge Tex project recrusively
|
Merge Tex project recrusively
|
||||||
@@ -309,18 +381,18 @@ def merge_tex_files_(project_foler, main_file, mode):
|
|||||||
fp_ = find_tex_file_ignore_case(fp)
|
fp_ = find_tex_file_ignore_case(fp)
|
||||||
if fp_:
|
if fp_:
|
||||||
try:
|
try:
|
||||||
with open(fp_, 'r', encoding='utf-8', errors='replace') as fx: c = fx.read()
|
with open(fp_, "r", encoding="utf-8", errors="replace") as fx:
|
||||||
|
c = fx.read()
|
||||||
except:
|
except:
|
||||||
c = f"\n\nWarning from GPT-Academic: LaTex source file is missing!\n\n"
|
c = f"\n\nWarning from GPT-Academic: LaTex source file is missing!\n\n"
|
||||||
else:
|
else:
|
||||||
raise RuntimeError(f'找不到{fp},Tex源文件缺失!')
|
raise RuntimeError(f"找不到{fp},Tex源文件缺失!")
|
||||||
c = merge_tex_files_(project_foler, c, mode)
|
c = merge_tex_files_(project_foler, c, mode)
|
||||||
main_file = main_file[:s.span()[0]] + c + main_file[s.span()[1]:]
|
main_file = main_file[: s.span()[0]] + c + main_file[s.span()[1] :]
|
||||||
return main_file
|
return main_file
|
||||||
|
|
||||||
|
|
||||||
def find_title_and_abs(main_file):
|
def find_title_and_abs(main_file):
|
||||||
|
|
||||||
def extract_abstract_1(text):
|
def extract_abstract_1(text):
|
||||||
pattern = r"\\abstract\{(.*?)\}"
|
pattern = r"\\abstract\{(.*?)\}"
|
||||||
match = re.search(pattern, text, re.DOTALL)
|
match = re.search(pattern, text, re.DOTALL)
|
||||||
@@ -352,6 +424,7 @@ def find_title_and_abs(main_file):
|
|||||||
title = extract_title(main_file)
|
title = extract_title(main_file)
|
||||||
return title, abstract
|
return title, abstract
|
||||||
|
|
||||||
|
|
||||||
def merge_tex_files(project_foler, main_file, mode):
|
def merge_tex_files(project_foler, main_file, mode):
|
||||||
"""
|
"""
|
||||||
Merge Tex project recrusively
|
Merge Tex project recrusively
|
||||||
@@ -361,21 +434,30 @@ def merge_tex_files(project_foler, main_file, mode):
|
|||||||
main_file = merge_tex_files_(project_foler, main_file, mode)
|
main_file = merge_tex_files_(project_foler, main_file, mode)
|
||||||
main_file = rm_comments(main_file)
|
main_file = rm_comments(main_file)
|
||||||
|
|
||||||
if mode == 'translate_zh':
|
if mode == "translate_zh":
|
||||||
# find paper documentclass
|
# find paper documentclass
|
||||||
pattern = re.compile(r'\\documentclass.*\n')
|
pattern = re.compile(r"\\documentclass.*\n")
|
||||||
match = pattern.search(main_file)
|
match = pattern.search(main_file)
|
||||||
assert match is not None, "Cannot find documentclass statement!"
|
assert match is not None, "Cannot find documentclass statement!"
|
||||||
position = match.end()
|
position = match.end()
|
||||||
add_ctex = '\\usepackage{ctex}\n'
|
add_ctex = "\\usepackage{ctex}\n"
|
||||||
add_url = '\\usepackage{url}\n' if '{url}' not in main_file else ''
|
add_url = "\\usepackage{url}\n" if "{url}" not in main_file else ""
|
||||||
main_file = main_file[:position] + add_ctex + add_url + main_file[position:]
|
main_file = main_file[:position] + add_ctex + add_url + main_file[position:]
|
||||||
# fontset=windows
|
# fontset=windows
|
||||||
import platform
|
import platform
|
||||||
main_file = re.sub(r"\\documentclass\[(.*?)\]{(.*?)}", r"\\documentclass[\1,fontset=windows,UTF8]{\2}",main_file)
|
|
||||||
main_file = re.sub(r"\\documentclass{(.*?)}", r"\\documentclass[fontset=windows,UTF8]{\1}",main_file)
|
main_file = re.sub(
|
||||||
|
r"\\documentclass\[(.*?)\]{(.*?)}",
|
||||||
|
r"\\documentclass[\1,fontset=windows,UTF8]{\2}",
|
||||||
|
main_file,
|
||||||
|
)
|
||||||
|
main_file = re.sub(
|
||||||
|
r"\\documentclass{(.*?)}",
|
||||||
|
r"\\documentclass[fontset=windows,UTF8]{\1}",
|
||||||
|
main_file,
|
||||||
|
)
|
||||||
# find paper abstract
|
# find paper abstract
|
||||||
pattern_opt1 = re.compile(r'\\begin\{abstract\}.*\n')
|
pattern_opt1 = re.compile(r"\\begin\{abstract\}.*\n")
|
||||||
pattern_opt2 = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
|
pattern_opt2 = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
|
||||||
match_opt1 = pattern_opt1.search(main_file)
|
match_opt1 = pattern_opt1.search(main_file)
|
||||||
match_opt2 = pattern_opt2.search(main_file)
|
match_opt2 = pattern_opt2.search(main_file)
|
||||||
@@ -384,7 +466,9 @@ def merge_tex_files(project_foler, main_file, mode):
|
|||||||
main_file = insert_abstract(main_file)
|
main_file = insert_abstract(main_file)
|
||||||
match_opt1 = pattern_opt1.search(main_file)
|
match_opt1 = pattern_opt1.search(main_file)
|
||||||
match_opt2 = pattern_opt2.search(main_file)
|
match_opt2 = pattern_opt2.search(main_file)
|
||||||
assert (match_opt1 is not None) or (match_opt2 is not None), "Cannot find paper abstract section!"
|
assert (match_opt1 is not None) or (
|
||||||
|
match_opt2 is not None
|
||||||
|
), "Cannot find paper abstract section!"
|
||||||
return main_file
|
return main_file
|
||||||
|
|
||||||
|
|
||||||
@@ -394,6 +478,7 @@ The GPT-Academic program cannot find abstract section in this paper.
|
|||||||
\end{abstract}
|
\end{abstract}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def insert_abstract(tex_content):
|
def insert_abstract(tex_content):
|
||||||
if "\\maketitle" in tex_content:
|
if "\\maketitle" in tex_content:
|
||||||
# find the position of "\maketitle"
|
# find the position of "\maketitle"
|
||||||
@@ -401,7 +486,13 @@ def insert_abstract(tex_content):
|
|||||||
# find the nearest ending line
|
# find the nearest ending line
|
||||||
end_line_index = tex_content.find("\n", find_index)
|
end_line_index = tex_content.find("\n", find_index)
|
||||||
# insert "abs_str" on the next line
|
# insert "abs_str" on the next line
|
||||||
modified_tex = tex_content[:end_line_index+1] + '\n\n' + insert_missing_abs_str + '\n\n' + tex_content[end_line_index+1:]
|
modified_tex = (
|
||||||
|
tex_content[: end_line_index + 1]
|
||||||
|
+ "\n\n"
|
||||||
|
+ insert_missing_abs_str
|
||||||
|
+ "\n\n"
|
||||||
|
+ tex_content[end_line_index + 1 :]
|
||||||
|
)
|
||||||
return modified_tex
|
return modified_tex
|
||||||
elif r"\begin{document}" in tex_content:
|
elif r"\begin{document}" in tex_content:
|
||||||
# find the position of "\maketitle"
|
# find the position of "\maketitle"
|
||||||
@@ -409,16 +500,25 @@ def insert_abstract(tex_content):
|
|||||||
# find the nearest ending line
|
# find the nearest ending line
|
||||||
end_line_index = tex_content.find("\n", find_index)
|
end_line_index = tex_content.find("\n", find_index)
|
||||||
# insert "abs_str" on the next line
|
# insert "abs_str" on the next line
|
||||||
modified_tex = tex_content[:end_line_index+1] + '\n\n' + insert_missing_abs_str + '\n\n' + tex_content[end_line_index+1:]
|
modified_tex = (
|
||||||
|
tex_content[: end_line_index + 1]
|
||||||
|
+ "\n\n"
|
||||||
|
+ insert_missing_abs_str
|
||||||
|
+ "\n\n"
|
||||||
|
+ tex_content[end_line_index + 1 :]
|
||||||
|
)
|
||||||
return modified_tex
|
return modified_tex
|
||||||
else:
|
else:
|
||||||
return tex_content
|
return tex_content
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
Post process
|
Post process
|
||||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def mod_inbraket(match):
|
def mod_inbraket(match):
|
||||||
"""
|
"""
|
||||||
为啥chatgpt会把cite里面的逗号换成中文逗号呀
|
为啥chatgpt会把cite里面的逗号换成中文逗号呀
|
||||||
@@ -427,11 +527,12 @@ def mod_inbraket(match):
|
|||||||
cmd = match.group(1)
|
cmd = match.group(1)
|
||||||
str_to_modify = match.group(2)
|
str_to_modify = match.group(2)
|
||||||
# modify the matched string
|
# modify the matched string
|
||||||
str_to_modify = str_to_modify.replace(':', ':') # 前面是中文冒号,后面是英文冒号
|
str_to_modify = str_to_modify.replace(":", ":") # 前面是中文冒号,后面是英文冒号
|
||||||
str_to_modify = str_to_modify.replace(',', ',') # 前面是中文逗号,后面是英文逗号
|
str_to_modify = str_to_modify.replace(",", ",") # 前面是中文逗号,后面是英文逗号
|
||||||
# str_to_modify = 'BOOM'
|
# str_to_modify = 'BOOM'
|
||||||
return "\\" + cmd + "{" + str_to_modify + "}"
|
return "\\" + cmd + "{" + str_to_modify + "}"
|
||||||
|
|
||||||
|
|
||||||
def fix_content(final_tex, node_string):
|
def fix_content(final_tex, node_string):
|
||||||
"""
|
"""
|
||||||
Fix common GPT errors to increase success rate
|
Fix common GPT errors to increase success rate
|
||||||
@@ -442,10 +543,10 @@ def fix_content(final_tex, node_string):
|
|||||||
final_tex = re.sub(r"\\([a-z]{2,10})\{([^\}]*?)\}", mod_inbraket, string=final_tex)
|
final_tex = re.sub(r"\\([a-z]{2,10})\{([^\}]*?)\}", mod_inbraket, string=final_tex)
|
||||||
|
|
||||||
if "Traceback" in final_tex and "[Local Message]" in final_tex:
|
if "Traceback" in final_tex and "[Local Message]" in final_tex:
|
||||||
final_tex = node_string # 出问题了,还原原文
|
final_tex = node_string # 出问题了,还原原文
|
||||||
if node_string.count('\\begin') != final_tex.count('\\begin'):
|
if node_string.count("\\begin") != final_tex.count("\\begin"):
|
||||||
final_tex = node_string # 出问题了,还原原文
|
final_tex = node_string # 出问题了,还原原文
|
||||||
if node_string.count('\_') > 0 and node_string.count('\_') > final_tex.count('\_'):
|
if node_string.count("\_") > 0 and node_string.count("\_") > final_tex.count("\_"):
|
||||||
# walk and replace any _ without \
|
# walk and replace any _ without \
|
||||||
final_tex = re.sub(r"(?<!\\)_", "\\_", final_tex)
|
final_tex = re.sub(r"(?<!\\)_", "\\_", final_tex)
|
||||||
|
|
||||||
@@ -453,24 +554,32 @@ def fix_content(final_tex, node_string):
|
|||||||
# this function count the number of { and }
|
# this function count the number of { and }
|
||||||
brace_level = 0
|
brace_level = 0
|
||||||
for c in string:
|
for c in string:
|
||||||
if c == "{": brace_level += 1
|
if c == "{":
|
||||||
elif c == "}": brace_level -= 1
|
brace_level += 1
|
||||||
|
elif c == "}":
|
||||||
|
brace_level -= 1
|
||||||
return brace_level
|
return brace_level
|
||||||
|
|
||||||
def join_most(tex_t, tex_o):
|
def join_most(tex_t, tex_o):
|
||||||
# this function join translated string and original string when something goes wrong
|
# this function join translated string and original string when something goes wrong
|
||||||
p_t = 0
|
p_t = 0
|
||||||
p_o = 0
|
p_o = 0
|
||||||
|
|
||||||
def find_next(string, chars, begin):
|
def find_next(string, chars, begin):
|
||||||
p = begin
|
p = begin
|
||||||
while p < len(string):
|
while p < len(string):
|
||||||
if string[p] in chars: return p, string[p]
|
if string[p] in chars:
|
||||||
|
return p, string[p]
|
||||||
p += 1
|
p += 1
|
||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
res1, char = find_next(tex_o, ['{','}'], p_o)
|
res1, char = find_next(tex_o, ["{", "}"], p_o)
|
||||||
if res1 is None: break
|
if res1 is None:
|
||||||
|
break
|
||||||
res2, char = find_next(tex_t, [char], p_t)
|
res2, char = find_next(tex_t, [char], p_t)
|
||||||
if res2 is None: break
|
if res2 is None:
|
||||||
|
break
|
||||||
p_o = res1 + 1
|
p_o = res1 + 1
|
||||||
p_t = res2 + 1
|
p_t = res2 + 1
|
||||||
return tex_t[:p_t] + tex_o[p_o:]
|
return tex_t[:p_t] + tex_o[p_o:]
|
||||||
@@ -480,9 +589,13 @@ def fix_content(final_tex, node_string):
|
|||||||
final_tex = join_most(final_tex, node_string)
|
final_tex = join_most(final_tex, node_string)
|
||||||
return final_tex
|
return final_tex
|
||||||
|
|
||||||
|
|
||||||
def compile_latex_with_timeout(command, cwd, timeout=60):
|
def compile_latex_with_timeout(command, cwd, timeout=60):
|
||||||
import subprocess
|
import subprocess
|
||||||
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
|
|
||||||
|
process = subprocess.Popen(
|
||||||
|
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
stdout, stderr = process.communicate(timeout=timeout)
|
stdout, stderr = process.communicate(timeout=timeout)
|
||||||
except subprocess.TimeoutExpired:
|
except subprocess.TimeoutExpired:
|
||||||
@@ -493,15 +606,51 @@ def compile_latex_with_timeout(command, cwd, timeout=60):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def run_in_subprocess_wrapper_func(func, args, kwargs, return_dict, exception_dict):
|
||||||
|
import sys
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = func(*args, **kwargs)
|
||||||
|
return_dict["result"] = result
|
||||||
|
except Exception as e:
|
||||||
|
exc_info = sys.exc_info()
|
||||||
|
exception_dict["exception"] = exc_info
|
||||||
|
|
||||||
|
|
||||||
|
def run_in_subprocess(func):
|
||||||
|
import multiprocessing
|
||||||
|
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
return_dict = multiprocessing.Manager().dict()
|
||||||
|
exception_dict = multiprocessing.Manager().dict()
|
||||||
|
process = multiprocessing.Process(
|
||||||
|
target=run_in_subprocess_wrapper_func,
|
||||||
|
args=(func, args, kwargs, return_dict, exception_dict),
|
||||||
|
)
|
||||||
|
process.start()
|
||||||
|
process.join()
|
||||||
|
process.close()
|
||||||
|
if "exception" in exception_dict:
|
||||||
|
# ooops, the subprocess ran into an exception
|
||||||
|
exc_info = exception_dict["exception"]
|
||||||
|
raise exc_info[1].with_traceback(exc_info[2])
|
||||||
|
if "result" in return_dict.keys():
|
||||||
|
# If the subprocess ran successfully, return the result
|
||||||
|
return return_dict["result"]
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
def _merge_pdfs(pdf1_path, pdf2_path, output_path):
|
||||||
|
import PyPDF2 # PyPDF2这个库有严重的内存泄露问题,把它放到子进程中运行,从而方便内存的释放
|
||||||
|
|
||||||
def merge_pdfs(pdf1_path, pdf2_path, output_path):
|
|
||||||
import PyPDF2
|
|
||||||
Percent = 0.95
|
Percent = 0.95
|
||||||
|
# raise RuntimeError('PyPDF2 has a serious memory leak problem, please use other tools to merge PDF files.')
|
||||||
# Open the first PDF file
|
# Open the first PDF file
|
||||||
with open(pdf1_path, 'rb') as pdf1_file:
|
with open(pdf1_path, "rb") as pdf1_file:
|
||||||
pdf1_reader = PyPDF2.PdfFileReader(pdf1_file)
|
pdf1_reader = PyPDF2.PdfFileReader(pdf1_file)
|
||||||
# Open the second PDF file
|
# Open the second PDF file
|
||||||
with open(pdf2_path, 'rb') as pdf2_file:
|
with open(pdf2_path, "rb") as pdf2_file:
|
||||||
pdf2_reader = PyPDF2.PdfFileReader(pdf2_file)
|
pdf2_reader = PyPDF2.PdfFileReader(pdf2_file)
|
||||||
# Create a new PDF file to store the merged pages
|
# Create a new PDF file to store the merged pages
|
||||||
output_writer = PyPDF2.PdfFileWriter()
|
output_writer = PyPDF2.PdfFileWriter()
|
||||||
@@ -521,12 +670,25 @@ def merge_pdfs(pdf1_path, pdf2_path, output_path):
|
|||||||
page2 = PyPDF2.PageObject.createBlankPage(pdf1_reader)
|
page2 = PyPDF2.PageObject.createBlankPage(pdf1_reader)
|
||||||
# Create a new empty page with double width
|
# Create a new empty page with double width
|
||||||
new_page = PyPDF2.PageObject.createBlankPage(
|
new_page = PyPDF2.PageObject.createBlankPage(
|
||||||
width = int(int(page1.mediaBox.getWidth()) + int(page2.mediaBox.getWidth()) * Percent),
|
width=int(
|
||||||
height = max(page1.mediaBox.getHeight(), page2.mediaBox.getHeight())
|
int(page1.mediaBox.getWidth())
|
||||||
|
+ int(page2.mediaBox.getWidth()) * Percent
|
||||||
|
),
|
||||||
|
height=max(page1.mediaBox.getHeight(), page2.mediaBox.getHeight()),
|
||||||
)
|
)
|
||||||
new_page.mergeTranslatedPage(page1, 0, 0)
|
new_page.mergeTranslatedPage(page1, 0, 0)
|
||||||
new_page.mergeTranslatedPage(page2, int(int(page1.mediaBox.getWidth())-int(page2.mediaBox.getWidth())* (1-Percent)), 0)
|
new_page.mergeTranslatedPage(
|
||||||
|
page2,
|
||||||
|
int(
|
||||||
|
int(page1.mediaBox.getWidth())
|
||||||
|
- int(page2.mediaBox.getWidth()) * (1 - Percent)
|
||||||
|
),
|
||||||
|
0,
|
||||||
|
)
|
||||||
output_writer.addPage(new_page)
|
output_writer.addPage(new_page)
|
||||||
# Save the merged PDF file
|
# Save the merged PDF file
|
||||||
with open(output_path, 'wb') as output_file:
|
with open(output_path, "wb") as output_file:
|
||||||
output_writer.write(output_file)
|
output_writer.write(output_file)
|
||||||
|
|
||||||
|
|
||||||
|
merge_pdfs = run_in_subprocess(_merge_pdfs) # PyPDF2这个库有严重的内存泄露问题,把它放到子进程中运行,从而方便内存的释放
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from typing import List
|
from typing import List
|
||||||
from toolbox import update_ui_lastest_msg, disable_auto_promotion
|
from toolbox import update_ui_lastest_msg, disable_auto_promotion
|
||||||
|
from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log_folder
|
||||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||||
from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
|
from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
|
||||||
import time
|
import time
|
||||||
@@ -21,11 +22,7 @@ class GptAcademicState():
|
|||||||
def reset(self):
|
def reset(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def lock_plugin(self, chatbot):
|
def dump_state(self, chatbot):
|
||||||
chatbot._cookies['plugin_state'] = pickle.dumps(self)
|
|
||||||
|
|
||||||
def unlock_plugin(self, chatbot):
|
|
||||||
self.reset()
|
|
||||||
chatbot._cookies['plugin_state'] = pickle.dumps(self)
|
chatbot._cookies['plugin_state'] = pickle.dumps(self)
|
||||||
|
|
||||||
def set_state(self, chatbot, key, value):
|
def set_state(self, chatbot, key, value):
|
||||||
@@ -40,6 +37,57 @@ class GptAcademicState():
|
|||||||
state.chatbot = chatbot
|
state.chatbot = chatbot
|
||||||
return state
|
return state
|
||||||
|
|
||||||
class GatherMaterials():
|
|
||||||
def __init__(self, materials) -> None:
|
class GptAcademicGameBaseState():
|
||||||
materials = ['image', 'prompt']
|
"""
|
||||||
|
1. first init: __init__ ->
|
||||||
|
"""
|
||||||
|
def init_game(self, chatbot, lock_plugin):
|
||||||
|
self.plugin_name = None
|
||||||
|
self.callback_fn = None
|
||||||
|
self.delete_game = False
|
||||||
|
self.step_cnt = 0
|
||||||
|
|
||||||
|
def lock_plugin(self, chatbot):
|
||||||
|
if self.callback_fn is None:
|
||||||
|
raise ValueError("callback_fn is None")
|
||||||
|
chatbot._cookies['lock_plugin'] = self.callback_fn
|
||||||
|
self.dump_state(chatbot)
|
||||||
|
|
||||||
|
def get_plugin_name(self):
|
||||||
|
if self.plugin_name is None:
|
||||||
|
raise ValueError("plugin_name is None")
|
||||||
|
return self.plugin_name
|
||||||
|
|
||||||
|
def dump_state(self, chatbot):
|
||||||
|
chatbot._cookies[f'plugin_state/{self.get_plugin_name()}'] = pickle.dumps(self)
|
||||||
|
|
||||||
|
def set_state(self, chatbot, key, value):
|
||||||
|
setattr(self, key, value)
|
||||||
|
chatbot._cookies[f'plugin_state/{self.get_plugin_name()}'] = pickle.dumps(self)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def sync_state(chatbot, llm_kwargs, cls, plugin_name, callback_fn, lock_plugin=True):
|
||||||
|
state = chatbot._cookies.get(f'plugin_state/{plugin_name}', None)
|
||||||
|
if state is not None:
|
||||||
|
state = pickle.loads(state)
|
||||||
|
else:
|
||||||
|
state = cls()
|
||||||
|
state.init_game(chatbot, lock_plugin)
|
||||||
|
state.plugin_name = plugin_name
|
||||||
|
state.llm_kwargs = llm_kwargs
|
||||||
|
state.chatbot = chatbot
|
||||||
|
state.callback_fn = callback_fn
|
||||||
|
return state
|
||||||
|
|
||||||
|
def continue_game(self, prompt, chatbot, history):
|
||||||
|
# 游戏主体
|
||||||
|
yield from self.step(prompt, chatbot, history)
|
||||||
|
self.step_cnt += 1
|
||||||
|
# 保存状态,收尾
|
||||||
|
self.dump_state(chatbot)
|
||||||
|
# 如果游戏结束,清理
|
||||||
|
if self.delete_game:
|
||||||
|
chatbot._cookies['lock_plugin'] = None
|
||||||
|
chatbot._cookies[f'plugin_state/{self.get_plugin_name()}'] = None
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|||||||
@@ -0,0 +1,125 @@
|
|||||||
|
from crazy_functions.ipc_fns.mp import run_in_subprocess_with_timeout
|
||||||
|
|
||||||
|
def force_breakdown(txt, limit, get_token_fn):
|
||||||
|
""" 当无法用标点、空行分割时,我们用最暴力的方法切割
|
||||||
|
"""
|
||||||
|
for i in reversed(range(len(txt))):
|
||||||
|
if get_token_fn(txt[:i]) < limit:
|
||||||
|
return txt[:i], txt[i:]
|
||||||
|
return "Tiktoken未知错误", "Tiktoken未知错误"
|
||||||
|
|
||||||
|
|
||||||
|
def maintain_storage(remain_txt_to_cut, remain_txt_to_cut_storage):
|
||||||
|
""" 为了加速计算,我们采样一个特殊的手段。当 remain_txt_to_cut > `_max` 时, 我们把 _max 后的文字转存至 remain_txt_to_cut_storage
|
||||||
|
当 remain_txt_to_cut < `_min` 时,我们再把 remain_txt_to_cut_storage 中的部分文字取出
|
||||||
|
"""
|
||||||
|
_min = int(5e4)
|
||||||
|
_max = int(1e5)
|
||||||
|
# print(len(remain_txt_to_cut), len(remain_txt_to_cut_storage))
|
||||||
|
if len(remain_txt_to_cut) < _min and len(remain_txt_to_cut_storage) > 0:
|
||||||
|
remain_txt_to_cut = remain_txt_to_cut + remain_txt_to_cut_storage
|
||||||
|
remain_txt_to_cut_storage = ""
|
||||||
|
if len(remain_txt_to_cut) > _max:
|
||||||
|
remain_txt_to_cut_storage = remain_txt_to_cut[_max:] + remain_txt_to_cut_storage
|
||||||
|
remain_txt_to_cut = remain_txt_to_cut[:_max]
|
||||||
|
return remain_txt_to_cut, remain_txt_to_cut_storage
|
||||||
|
|
||||||
|
|
||||||
|
def cut(limit, get_token_fn, txt_tocut, must_break_at_empty_line, break_anyway=False):
|
||||||
|
""" 文本切分
|
||||||
|
"""
|
||||||
|
res = []
|
||||||
|
total_len = len(txt_tocut)
|
||||||
|
fin_len = 0
|
||||||
|
remain_txt_to_cut = txt_tocut
|
||||||
|
remain_txt_to_cut_storage = ""
|
||||||
|
# 为了加速计算,我们采样一个特殊的手段。当 remain_txt_to_cut > `_max` 时, 我们把 _max 后的文字转存至 remain_txt_to_cut_storage
|
||||||
|
remain_txt_to_cut, remain_txt_to_cut_storage = maintain_storage(remain_txt_to_cut, remain_txt_to_cut_storage)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
if get_token_fn(remain_txt_to_cut) <= limit:
|
||||||
|
# 如果剩余文本的token数小于限制,那么就不用切了
|
||||||
|
res.append(remain_txt_to_cut); fin_len+=len(remain_txt_to_cut)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
# 如果剩余文本的token数大于限制,那么就切
|
||||||
|
lines = remain_txt_to_cut.split('\n')
|
||||||
|
|
||||||
|
# 估计一个切分点
|
||||||
|
estimated_line_cut = limit / get_token_fn(remain_txt_to_cut) * len(lines)
|
||||||
|
estimated_line_cut = int(estimated_line_cut)
|
||||||
|
|
||||||
|
# 开始查找合适切分点的偏移(cnt)
|
||||||
|
cnt = 0
|
||||||
|
for cnt in reversed(range(estimated_line_cut)):
|
||||||
|
if must_break_at_empty_line:
|
||||||
|
# 首先尝试用双空行(\n\n)作为切分点
|
||||||
|
if lines[cnt] != "":
|
||||||
|
continue
|
||||||
|
prev = "\n".join(lines[:cnt])
|
||||||
|
post = "\n".join(lines[cnt:])
|
||||||
|
if get_token_fn(prev) < limit:
|
||||||
|
break
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
# 如果没有找到合适的切分点
|
||||||
|
if break_anyway:
|
||||||
|
# 是否允许暴力切分
|
||||||
|
prev, post = force_breakdown(remain_txt_to_cut, limit, get_token_fn)
|
||||||
|
else:
|
||||||
|
# 不允许直接报错
|
||||||
|
raise RuntimeError(f"存在一行极长的文本!{remain_txt_to_cut}")
|
||||||
|
|
||||||
|
# 追加列表
|
||||||
|
res.append(prev); fin_len+=len(prev)
|
||||||
|
# 准备下一次迭代
|
||||||
|
remain_txt_to_cut = post
|
||||||
|
remain_txt_to_cut, remain_txt_to_cut_storage = maintain_storage(remain_txt_to_cut, remain_txt_to_cut_storage)
|
||||||
|
process = fin_len/total_len
|
||||||
|
print(f'正在文本切分 {int(process*100)}%')
|
||||||
|
if len(remain_txt_to_cut.strip()) == 0:
|
||||||
|
break
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def breakdown_text_to_satisfy_token_limit_(txt, limit, llm_model="gpt-3.5-turbo"):
|
||||||
|
""" 使用多种方式尝试切分文本,以满足 token 限制
|
||||||
|
"""
|
||||||
|
from request_llms.bridge_all import model_info
|
||||||
|
enc = model_info[llm_model]['tokenizer']
|
||||||
|
def get_token_fn(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||||
|
try:
|
||||||
|
# 第1次尝试,将双空行(\n\n)作为切分点
|
||||||
|
return cut(limit, get_token_fn, txt, must_break_at_empty_line=True)
|
||||||
|
except RuntimeError:
|
||||||
|
try:
|
||||||
|
# 第2次尝试,将单空行(\n)作为切分点
|
||||||
|
return cut(limit, get_token_fn, txt, must_break_at_empty_line=False)
|
||||||
|
except RuntimeError:
|
||||||
|
try:
|
||||||
|
# 第3次尝试,将英文句号(.)作为切分点
|
||||||
|
res = cut(limit, get_token_fn, txt.replace('.', '。\n'), must_break_at_empty_line=False) # 这个中文的句号是故意的,作为一个标识而存在
|
||||||
|
return [r.replace('。\n', '.') for r in res]
|
||||||
|
except RuntimeError as e:
|
||||||
|
try:
|
||||||
|
# 第4次尝试,将中文句号(。)作为切分点
|
||||||
|
res = cut(limit, get_token_fn, txt.replace('。', '。。\n'), must_break_at_empty_line=False)
|
||||||
|
return [r.replace('。。\n', '。') for r in res]
|
||||||
|
except RuntimeError as e:
|
||||||
|
# 第5次尝试,没办法了,随便切一下吧
|
||||||
|
return cut(limit, get_token_fn, txt, must_break_at_empty_line=False, break_anyway=True)
|
||||||
|
|
||||||
|
breakdown_text_to_satisfy_token_limit = run_in_subprocess_with_timeout(breakdown_text_to_satisfy_token_limit_, timeout=60)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
from crazy_functions.crazy_utils import read_and_clean_pdf_text
|
||||||
|
file_content, page_one = read_and_clean_pdf_text("build/assets/at.pdf")
|
||||||
|
|
||||||
|
from request_llms.bridge_all import model_info
|
||||||
|
for i in range(5):
|
||||||
|
file_content += file_content
|
||||||
|
|
||||||
|
print(len(file_content))
|
||||||
|
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
||||||
|
res = breakdown_text_to_satisfy_token_limit(file_content, TOKEN_LIMIT_PER_FRAGMENT)
|
||||||
|
|
||||||
@@ -74,7 +74,7 @@ def produce_report_markdown(gpt_response_collection, meta, paper_meta_info, chat
|
|||||||
|
|
||||||
def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG):
|
def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG):
|
||||||
from crazy_functions.pdf_fns.report_gen_html import construct_html
|
from crazy_functions.pdf_fns.report_gen_html import construct_html
|
||||||
from crazy_functions.crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||||
|
|
||||||
@@ -116,7 +116,7 @@ def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_fi
|
|||||||
# find a smooth token limit to achieve even seperation
|
# find a smooth token limit to achieve even seperation
|
||||||
count = int(math.ceil(raw_token_num / TOKEN_LIMIT_PER_FRAGMENT))
|
count = int(math.ceil(raw_token_num / TOKEN_LIMIT_PER_FRAGMENT))
|
||||||
token_limit_smooth = raw_token_num // count + count
|
token_limit_smooth = raw_token_num // count + count
|
||||||
return breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn=get_token_num, limit=token_limit_smooth)
|
return breakdown_text_to_satisfy_token_limit(txt, limit=token_limit_smooth, llm_model=llm_kwargs['llm_model'])
|
||||||
|
|
||||||
for section in article_dict.get('sections'):
|
for section in article_dict.get('sections'):
|
||||||
if len(section['text']) == 0: continue
|
if len(section['text']) == 0: continue
|
||||||
|
|||||||
@@ -0,0 +1,70 @@
|
|||||||
|
# From project chatglm-langchain
|
||||||
|
|
||||||
|
|
||||||
|
from langchain.document_loaders import UnstructuredFileLoader
|
||||||
|
from langchain.text_splitter import CharacterTextSplitter
|
||||||
|
import re
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
class ChineseTextSplitter(CharacterTextSplitter):
|
||||||
|
def __init__(self, pdf: bool = False, sentence_size: int = None, **kwargs):
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
self.pdf = pdf
|
||||||
|
self.sentence_size = sentence_size
|
||||||
|
|
||||||
|
def split_text1(self, text: str) -> List[str]:
|
||||||
|
if self.pdf:
|
||||||
|
text = re.sub(r"\n{3,}", "\n", text)
|
||||||
|
text = re.sub('\s', ' ', text)
|
||||||
|
text = text.replace("\n\n", "")
|
||||||
|
sent_sep_pattern = re.compile('([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))') # del :;
|
||||||
|
sent_list = []
|
||||||
|
for ele in sent_sep_pattern.split(text):
|
||||||
|
if sent_sep_pattern.match(ele) and sent_list:
|
||||||
|
sent_list[-1] += ele
|
||||||
|
elif ele:
|
||||||
|
sent_list.append(ele)
|
||||||
|
return sent_list
|
||||||
|
|
||||||
|
def split_text(self, text: str) -> List[str]: ##此处需要进一步优化逻辑
|
||||||
|
if self.pdf:
|
||||||
|
text = re.sub(r"\n{3,}", r"\n", text)
|
||||||
|
text = re.sub('\s', " ", text)
|
||||||
|
text = re.sub("\n\n", "", text)
|
||||||
|
|
||||||
|
text = re.sub(r'([;;.!?。!?\?])([^”’])', r"\1\n\2", text) # 单字符断句符
|
||||||
|
text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text) # 英文省略号
|
||||||
|
text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text) # 中文省略号
|
||||||
|
text = re.sub(r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])', r'\1\n\2', text)
|
||||||
|
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
|
||||||
|
text = text.rstrip() # 段尾如果有多余的\n就去掉它
|
||||||
|
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
|
||||||
|
ls = [i for i in text.split("\n") if i]
|
||||||
|
for ele in ls:
|
||||||
|
if len(ele) > self.sentence_size:
|
||||||
|
ele1 = re.sub(r'([,,.]["’”」』]{0,2})([^,,.])', r'\1\n\2', ele)
|
||||||
|
ele1_ls = ele1.split("\n")
|
||||||
|
for ele_ele1 in ele1_ls:
|
||||||
|
if len(ele_ele1) > self.sentence_size:
|
||||||
|
ele_ele2 = re.sub(r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r'\1\n\2', ele_ele1)
|
||||||
|
ele2_ls = ele_ele2.split("\n")
|
||||||
|
for ele_ele2 in ele2_ls:
|
||||||
|
if len(ele_ele2) > self.sentence_size:
|
||||||
|
ele_ele3 = re.sub('( ["’”」』]{0,2})([^ ])', r'\1\n\2', ele_ele2)
|
||||||
|
ele2_id = ele2_ls.index(ele_ele2)
|
||||||
|
ele2_ls = ele2_ls[:ele2_id] + [i for i in ele_ele3.split("\n") if i] + ele2_ls[
|
||||||
|
ele2_id + 1:]
|
||||||
|
ele_id = ele1_ls.index(ele_ele1)
|
||||||
|
ele1_ls = ele1_ls[:ele_id] + [i for i in ele2_ls if i] + ele1_ls[ele_id + 1:]
|
||||||
|
|
||||||
|
id = ls.index(ele)
|
||||||
|
ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1:]
|
||||||
|
return ls
|
||||||
|
|
||||||
|
def load_file(filepath, sentence_size):
|
||||||
|
loader = UnstructuredFileLoader(filepath, mode="elements")
|
||||||
|
textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
|
||||||
|
docs = loader.load_and_split(text_splitter=textsplitter)
|
||||||
|
# write_check_file(filepath, docs)
|
||||||
|
return docs
|
||||||
|
|
||||||
@@ -0,0 +1,338 @@
|
|||||||
|
# From project chatglm-langchain
|
||||||
|
|
||||||
|
import threading
|
||||||
|
from toolbox import Singleton
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import os
|
||||||
|
import uuid
|
||||||
|
import tqdm
|
||||||
|
from langchain.vectorstores import FAISS
|
||||||
|
from langchain.docstore.document import Document
|
||||||
|
from typing import List, Tuple
|
||||||
|
import numpy as np
|
||||||
|
from crazy_functions.vector_fns.general_file_loader import load_file
|
||||||
|
|
||||||
|
embedding_model_dict = {
|
||||||
|
"ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
|
||||||
|
"ernie-base": "nghuyong/ernie-3.0-base-zh",
|
||||||
|
"text2vec-base": "shibing624/text2vec-base-chinese",
|
||||||
|
"text2vec": "GanymedeNil/text2vec-large-chinese",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Embedding model name
|
||||||
|
EMBEDDING_MODEL = "text2vec"
|
||||||
|
|
||||||
|
# Embedding running device
|
||||||
|
EMBEDDING_DEVICE = "cpu"
|
||||||
|
|
||||||
|
# 基于上下文的prompt模版,请务必保留"{question}"和"{context}"
|
||||||
|
PROMPT_TEMPLATE = """已知信息:
|
||||||
|
{context}
|
||||||
|
|
||||||
|
根据上述已知信息,简洁和专业的来回答用户的问题。如果无法从中得到答案,请说 “根据已知信息无法回答该问题” 或 “没有提供足够的相关信息”,不允许在答案中添加编造成分,答案请使用中文。 问题是:{question}"""
|
||||||
|
|
||||||
|
# 文本分句长度
|
||||||
|
SENTENCE_SIZE = 100
|
||||||
|
|
||||||
|
# 匹配后单段上下文长度
|
||||||
|
CHUNK_SIZE = 250
|
||||||
|
|
||||||
|
# LLM input history length
|
||||||
|
LLM_HISTORY_LEN = 3
|
||||||
|
|
||||||
|
# return top-k text chunk from vector store
|
||||||
|
VECTOR_SEARCH_TOP_K = 5
|
||||||
|
|
||||||
|
# 知识检索内容相关度 Score, 数值范围约为0-1100,如果为0,则不生效,经测试设置为小于500时,匹配结果更精准
|
||||||
|
VECTOR_SEARCH_SCORE_THRESHOLD = 0
|
||||||
|
|
||||||
|
NLTK_DATA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "nltk_data")
|
||||||
|
|
||||||
|
FLAG_USER_NAME = uuid.uuid4().hex
|
||||||
|
|
||||||
|
# 是否开启跨域,默认为False,如果需要开启,请设置为True
|
||||||
|
# is open cross domain
|
||||||
|
OPEN_CROSS_DOMAIN = False
|
||||||
|
|
||||||
|
def similarity_search_with_score_by_vector(
|
||||||
|
self, embedding: List[float], k: int = 4
|
||||||
|
) -> List[Tuple[Document, float]]:
|
||||||
|
|
||||||
|
def seperate_list(ls: List[int]) -> List[List[int]]:
|
||||||
|
lists = []
|
||||||
|
ls1 = [ls[0]]
|
||||||
|
for i in range(1, len(ls)):
|
||||||
|
if ls[i - 1] + 1 == ls[i]:
|
||||||
|
ls1.append(ls[i])
|
||||||
|
else:
|
||||||
|
lists.append(ls1)
|
||||||
|
ls1 = [ls[i]]
|
||||||
|
lists.append(ls1)
|
||||||
|
return lists
|
||||||
|
|
||||||
|
scores, indices = self.index.search(np.array([embedding], dtype=np.float32), k)
|
||||||
|
docs = []
|
||||||
|
id_set = set()
|
||||||
|
store_len = len(self.index_to_docstore_id)
|
||||||
|
for j, i in enumerate(indices[0]):
|
||||||
|
if i == -1 or 0 < self.score_threshold < scores[0][j]:
|
||||||
|
# This happens when not enough docs are returned.
|
||||||
|
continue
|
||||||
|
_id = self.index_to_docstore_id[i]
|
||||||
|
doc = self.docstore.search(_id)
|
||||||
|
if not self.chunk_conent:
|
||||||
|
if not isinstance(doc, Document):
|
||||||
|
raise ValueError(f"Could not find document for id {_id}, got {doc}")
|
||||||
|
doc.metadata["score"] = int(scores[0][j])
|
||||||
|
docs.append(doc)
|
||||||
|
continue
|
||||||
|
id_set.add(i)
|
||||||
|
docs_len = len(doc.page_content)
|
||||||
|
for k in range(1, max(i, store_len - i)):
|
||||||
|
break_flag = False
|
||||||
|
for l in [i + k, i - k]:
|
||||||
|
if 0 <= l < len(self.index_to_docstore_id):
|
||||||
|
_id0 = self.index_to_docstore_id[l]
|
||||||
|
doc0 = self.docstore.search(_id0)
|
||||||
|
if docs_len + len(doc0.page_content) > self.chunk_size:
|
||||||
|
break_flag = True
|
||||||
|
break
|
||||||
|
elif doc0.metadata["source"] == doc.metadata["source"]:
|
||||||
|
docs_len += len(doc0.page_content)
|
||||||
|
id_set.add(l)
|
||||||
|
if break_flag:
|
||||||
|
break
|
||||||
|
if not self.chunk_conent:
|
||||||
|
return docs
|
||||||
|
if len(id_set) == 0 and self.score_threshold > 0:
|
||||||
|
return []
|
||||||
|
id_list = sorted(list(id_set))
|
||||||
|
id_lists = seperate_list(id_list)
|
||||||
|
for id_seq in id_lists:
|
||||||
|
for id in id_seq:
|
||||||
|
if id == id_seq[0]:
|
||||||
|
_id = self.index_to_docstore_id[id]
|
||||||
|
doc = self.docstore.search(_id)
|
||||||
|
else:
|
||||||
|
_id0 = self.index_to_docstore_id[id]
|
||||||
|
doc0 = self.docstore.search(_id0)
|
||||||
|
doc.page_content += " " + doc0.page_content
|
||||||
|
if not isinstance(doc, Document):
|
||||||
|
raise ValueError(f"Could not find document for id {_id}, got {doc}")
|
||||||
|
doc_score = min([scores[0][id] for id in [indices[0].tolist().index(i) for i in id_seq if i in indices[0]]])
|
||||||
|
doc.metadata["score"] = int(doc_score)
|
||||||
|
docs.append(doc)
|
||||||
|
return docs
|
||||||
|
|
||||||
|
|
||||||
|
class LocalDocQA:
|
||||||
|
llm: object = None
|
||||||
|
embeddings: object = None
|
||||||
|
top_k: int = VECTOR_SEARCH_TOP_K
|
||||||
|
chunk_size: int = CHUNK_SIZE
|
||||||
|
chunk_conent: bool = True
|
||||||
|
score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
|
||||||
|
|
||||||
|
def init_cfg(self,
|
||||||
|
top_k=VECTOR_SEARCH_TOP_K,
|
||||||
|
):
|
||||||
|
|
||||||
|
self.llm = None
|
||||||
|
self.top_k = top_k
|
||||||
|
|
||||||
|
def init_knowledge_vector_store(self,
|
||||||
|
filepath,
|
||||||
|
vs_path: str or os.PathLike = None,
|
||||||
|
sentence_size=SENTENCE_SIZE,
|
||||||
|
text2vec=None):
|
||||||
|
loaded_files = []
|
||||||
|
failed_files = []
|
||||||
|
if isinstance(filepath, str):
|
||||||
|
if not os.path.exists(filepath):
|
||||||
|
print("路径不存在")
|
||||||
|
return None
|
||||||
|
elif os.path.isfile(filepath):
|
||||||
|
file = os.path.split(filepath)[-1]
|
||||||
|
try:
|
||||||
|
docs = load_file(filepath, SENTENCE_SIZE)
|
||||||
|
print(f"{file} 已成功加载")
|
||||||
|
loaded_files.append(filepath)
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
print(f"{file} 未能成功加载")
|
||||||
|
return None
|
||||||
|
elif os.path.isdir(filepath):
|
||||||
|
docs = []
|
||||||
|
for file in tqdm(os.listdir(filepath), desc="加载文件"):
|
||||||
|
fullfilepath = os.path.join(filepath, file)
|
||||||
|
try:
|
||||||
|
docs += load_file(fullfilepath, SENTENCE_SIZE)
|
||||||
|
loaded_files.append(fullfilepath)
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
failed_files.append(file)
|
||||||
|
|
||||||
|
if len(failed_files) > 0:
|
||||||
|
print("以下文件未能成功加载:")
|
||||||
|
for file in failed_files:
|
||||||
|
print(f"{file}\n")
|
||||||
|
|
||||||
|
else:
|
||||||
|
docs = []
|
||||||
|
for file in filepath:
|
||||||
|
docs += load_file(file, SENTENCE_SIZE)
|
||||||
|
print(f"{file} 已成功加载")
|
||||||
|
loaded_files.append(file)
|
||||||
|
|
||||||
|
if len(docs) > 0:
|
||||||
|
print("文件加载完毕,正在生成向量库")
|
||||||
|
if vs_path and os.path.isdir(vs_path):
|
||||||
|
try:
|
||||||
|
self.vector_store = FAISS.load_local(vs_path, text2vec)
|
||||||
|
self.vector_store.add_documents(docs)
|
||||||
|
except:
|
||||||
|
self.vector_store = FAISS.from_documents(docs, text2vec)
|
||||||
|
else:
|
||||||
|
self.vector_store = FAISS.from_documents(docs, text2vec) # docs 为Document列表
|
||||||
|
|
||||||
|
self.vector_store.save_local(vs_path)
|
||||||
|
return vs_path, loaded_files
|
||||||
|
else:
|
||||||
|
raise RuntimeError("文件加载失败,请检查文件格式是否正确")
|
||||||
|
|
||||||
|
def get_loaded_file(self, vs_path):
|
||||||
|
ds = self.vector_store.docstore
|
||||||
|
return set([ds._dict[k].metadata['source'].split(vs_path)[-1] for k in ds._dict])
|
||||||
|
|
||||||
|
|
||||||
|
# query 查询内容
|
||||||
|
# vs_path 知识库路径
|
||||||
|
# chunk_conent 是否启用上下文关联
|
||||||
|
# score_threshold 搜索匹配score阈值
|
||||||
|
# vector_search_top_k 搜索知识库内容条数,默认搜索5条结果
|
||||||
|
# chunk_sizes 匹配单段内容的连接上下文长度
|
||||||
|
def get_knowledge_based_conent_test(self, query, vs_path, chunk_conent,
|
||||||
|
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
|
||||||
|
vector_search_top_k=VECTOR_SEARCH_TOP_K, chunk_size=CHUNK_SIZE,
|
||||||
|
text2vec=None):
|
||||||
|
self.vector_store = FAISS.load_local(vs_path, text2vec)
|
||||||
|
self.vector_store.chunk_conent = chunk_conent
|
||||||
|
self.vector_store.score_threshold = score_threshold
|
||||||
|
self.vector_store.chunk_size = chunk_size
|
||||||
|
|
||||||
|
embedding = self.vector_store.embedding_function.embed_query(query)
|
||||||
|
related_docs_with_score = similarity_search_with_score_by_vector(self.vector_store, embedding, k=vector_search_top_k)
|
||||||
|
|
||||||
|
if not related_docs_with_score:
|
||||||
|
response = {"query": query,
|
||||||
|
"source_documents": []}
|
||||||
|
return response, ""
|
||||||
|
# prompt = f"{query}. You should answer this question using information from following documents: \n\n"
|
||||||
|
prompt = f"{query}. 你必须利用以下文档中包含的信息回答这个问题: \n\n---\n\n"
|
||||||
|
prompt += "\n\n".join([f"({k}): " + doc.page_content for k, doc in enumerate(related_docs_with_score)])
|
||||||
|
prompt += "\n\n---\n\n"
|
||||||
|
prompt = prompt.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
||||||
|
# print(prompt)
|
||||||
|
response = {"query": query, "source_documents": related_docs_with_score}
|
||||||
|
return response, prompt
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def construct_vector_store(vs_id, vs_path, files, sentence_size, history, one_conent, one_content_segmentation, text2vec):
|
||||||
|
for file in files:
|
||||||
|
assert os.path.exists(file), "输入文件不存在:" + file
|
||||||
|
import nltk
|
||||||
|
if NLTK_DATA_PATH not in nltk.data.path: nltk.data.path = [NLTK_DATA_PATH] + nltk.data.path
|
||||||
|
local_doc_qa = LocalDocQA()
|
||||||
|
local_doc_qa.init_cfg()
|
||||||
|
filelist = []
|
||||||
|
if not os.path.exists(os.path.join(vs_path, vs_id)):
|
||||||
|
os.makedirs(os.path.join(vs_path, vs_id))
|
||||||
|
for file in files:
|
||||||
|
file_name = file.name if not isinstance(file, str) else file
|
||||||
|
filename = os.path.split(file_name)[-1]
|
||||||
|
shutil.copyfile(file_name, os.path.join(vs_path, vs_id, filename))
|
||||||
|
filelist.append(os.path.join(vs_path, vs_id, filename))
|
||||||
|
vs_path, loaded_files = local_doc_qa.init_knowledge_vector_store(filelist, os.path.join(vs_path, vs_id), sentence_size, text2vec)
|
||||||
|
|
||||||
|
if len(loaded_files):
|
||||||
|
file_status = f"已添加 {'、'.join([os.path.split(i)[-1] for i in loaded_files if i])} 内容至知识库,并已加载知识库,请开始提问"
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
# file_status = "文件未成功加载,请重新上传文件"
|
||||||
|
# print(file_status)
|
||||||
|
return local_doc_qa, vs_path
|
||||||
|
|
||||||
|
@Singleton
|
||||||
|
class knowledge_archive_interface():
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.threadLock = threading.Lock()
|
||||||
|
self.current_id = ""
|
||||||
|
self.kai_path = None
|
||||||
|
self.qa_handle = None
|
||||||
|
self.text2vec_large_chinese = None
|
||||||
|
|
||||||
|
def get_chinese_text2vec(self):
|
||||||
|
if self.text2vec_large_chinese is None:
|
||||||
|
# < -------------------预热文本向量化模组--------------- >
|
||||||
|
from toolbox import ProxyNetworkActivate
|
||||||
|
print('Checking Text2vec ...')
|
||||||
|
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||||
|
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
|
||||||
|
self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
|
||||||
|
|
||||||
|
return self.text2vec_large_chinese
|
||||||
|
|
||||||
|
|
||||||
|
def feed_archive(self, file_manifest, vs_path, id="default"):
|
||||||
|
self.threadLock.acquire()
|
||||||
|
# import uuid
|
||||||
|
self.current_id = id
|
||||||
|
self.qa_handle, self.kai_path = construct_vector_store(
|
||||||
|
vs_id=self.current_id,
|
||||||
|
vs_path=vs_path,
|
||||||
|
files=file_manifest,
|
||||||
|
sentence_size=100,
|
||||||
|
history=[],
|
||||||
|
one_conent="",
|
||||||
|
one_content_segmentation="",
|
||||||
|
text2vec = self.get_chinese_text2vec(),
|
||||||
|
)
|
||||||
|
self.threadLock.release()
|
||||||
|
|
||||||
|
def get_current_archive_id(self):
|
||||||
|
return self.current_id
|
||||||
|
|
||||||
|
def get_loaded_file(self, vs_path):
|
||||||
|
return self.qa_handle.get_loaded_file(vs_path)
|
||||||
|
|
||||||
|
def answer_with_archive_by_id(self, txt, id, vs_path):
|
||||||
|
self.threadLock.acquire()
|
||||||
|
if not self.current_id == id:
|
||||||
|
self.current_id = id
|
||||||
|
self.qa_handle, self.kai_path = construct_vector_store(
|
||||||
|
vs_id=self.current_id,
|
||||||
|
vs_path=vs_path,
|
||||||
|
files=[],
|
||||||
|
sentence_size=100,
|
||||||
|
history=[],
|
||||||
|
one_conent="",
|
||||||
|
one_content_segmentation="",
|
||||||
|
text2vec = self.get_chinese_text2vec(),
|
||||||
|
)
|
||||||
|
VECTOR_SEARCH_SCORE_THRESHOLD = 0
|
||||||
|
VECTOR_SEARCH_TOP_K = 4
|
||||||
|
CHUNK_SIZE = 512
|
||||||
|
resp, prompt = self.qa_handle.get_knowledge_based_conent_test(
|
||||||
|
query = txt,
|
||||||
|
vs_path = self.kai_path,
|
||||||
|
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
|
||||||
|
vector_search_top_k=VECTOR_SEARCH_TOP_K,
|
||||||
|
chunk_conent=True,
|
||||||
|
chunk_size=CHUNK_SIZE,
|
||||||
|
text2vec = self.get_chinese_text2vec(),
|
||||||
|
)
|
||||||
|
self.threadLock.release()
|
||||||
|
return resp, prompt
|
||||||
@@ -130,7 +130,7 @@ def get_name(_url_):
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
|
|
||||||
CRAZY_FUNCTION_INFO = "下载arxiv论文并翻译摘要,函数插件作者[binary-husky]。正在提取摘要并下载PDF文档……"
|
CRAZY_FUNCTION_INFO = "下载arxiv论文并翻译摘要,函数插件作者[binary-husky]。正在提取摘要并下载PDF文档……"
|
||||||
import glob
|
import glob
|
||||||
|
|||||||
40
crazy_functions/互动小游戏.py
普通文件
40
crazy_functions/互动小游戏.py
普通文件
@@ -0,0 +1,40 @@
|
|||||||
|
from toolbox import CatchException, update_ui, update_ui_lastest_msg
|
||||||
|
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
|
||||||
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
|
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||||
|
from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing
|
||||||
|
|
||||||
|
@CatchException
|
||||||
|
def 随机小游戏(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
|
from crazy_functions.game_fns.game_interactive_story import MiniGame_ResumeStory
|
||||||
|
# 清空历史
|
||||||
|
history = []
|
||||||
|
# 选择游戏
|
||||||
|
cls = MiniGame_ResumeStory
|
||||||
|
# 如果之前已经初始化了游戏实例,则继续该实例;否则重新初始化
|
||||||
|
state = cls.sync_state(chatbot,
|
||||||
|
llm_kwargs,
|
||||||
|
cls,
|
||||||
|
plugin_name='MiniGame_ResumeStory',
|
||||||
|
callback_fn='crazy_functions.互动小游戏->随机小游戏',
|
||||||
|
lock_plugin=True
|
||||||
|
)
|
||||||
|
yield from state.continue_game(prompt, chatbot, history)
|
||||||
|
|
||||||
|
|
||||||
|
@CatchException
|
||||||
|
def 随机小游戏1(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
|
from crazy_functions.game_fns.game_ascii_art import MiniGame_ASCII_Art
|
||||||
|
# 清空历史
|
||||||
|
history = []
|
||||||
|
# 选择游戏
|
||||||
|
cls = MiniGame_ASCII_Art
|
||||||
|
# 如果之前已经初始化了游戏实例,则继续该实例;否则重新初始化
|
||||||
|
state = cls.sync_state(chatbot,
|
||||||
|
llm_kwargs,
|
||||||
|
cls,
|
||||||
|
plugin_name='MiniGame_ASCII_Art',
|
||||||
|
callback_fn='crazy_functions.互动小游戏->随机小游戏1',
|
||||||
|
lock_plugin=True
|
||||||
|
)
|
||||||
|
yield from state.continue_game(prompt, chatbot, history)
|
||||||
@@ -3,7 +3,7 @@ from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 交互功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 交互功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
|
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
|
||||||
@@ -11,7 +11,7 @@ def 交互功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
history 聊天历史,前情提要
|
history 聊天历史,前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
chatbot.append(("这是什么功能?", "交互功能函数模板。在执行完成之后, 可以将自身的状态存储到cookie中, 等待用户的再次调用。"))
|
chatbot.append(("这是什么功能?", "交互功能函数模板。在执行完成之后, 可以将自身的状态存储到cookie中, 等待用户的再次调用。"))
|
||||||
|
|||||||
@@ -139,7 +139,7 @@ def get_recent_file_prompt_support(chatbot):
|
|||||||
return path
|
return path
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
@@ -147,7 +147,7 @@ def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
history 聊天历史,前情提要
|
history 聊天历史,前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# 清空历史
|
# 清空历史
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from .crazy_utils import input_clipping
|
|||||||
import copy, json
|
import copy, json
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 命令行助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 命令行助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
txt 输入栏用户输入的文本, 例如需要翻译的一段话, 再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本, 例如需要翻译的一段话, 再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
|
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
|
||||||
@@ -12,7 +12,7 @@ def 命令行助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|||||||
chatbot 聊天显示框的句柄, 用于显示给用户
|
chatbot 聊天显示框的句柄, 用于显示给用户
|
||||||
history 聊天历史, 前情提要
|
history 聊天历史, 前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
# 清空历史, 以免输入溢出
|
# 清空历史, 以免输入溢出
|
||||||
history = []
|
history = []
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log
|
|||||||
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicState
|
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicState
|
||||||
|
|
||||||
|
|
||||||
def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2", quality=None):
|
def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2", quality=None, style=None):
|
||||||
import requests, json, time, os
|
import requests, json, time, os
|
||||||
from request_llms.bridge_all import model_info
|
from request_llms.bridge_all import model_info
|
||||||
|
|
||||||
@@ -25,7 +25,10 @@ def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2", qual
|
|||||||
'model': model,
|
'model': model,
|
||||||
'response_format': 'url'
|
'response_format': 'url'
|
||||||
}
|
}
|
||||||
if quality is not None: data.update({'quality': quality})
|
if quality is not None:
|
||||||
|
data['quality'] = quality
|
||||||
|
if style is not None:
|
||||||
|
data['style'] = style
|
||||||
response = requests.post(url, headers=headers, json=data, proxies=proxies)
|
response = requests.post(url, headers=headers, json=data, proxies=proxies)
|
||||||
print(response.content)
|
print(response.content)
|
||||||
try:
|
try:
|
||||||
@@ -54,19 +57,25 @@ def edit_image(llm_kwargs, prompt, image_path, resolution="1024x1024", model="da
|
|||||||
img_endpoint = chat_endpoint.replace('chat/completions','images/edits')
|
img_endpoint = chat_endpoint.replace('chat/completions','images/edits')
|
||||||
# # Generate the image
|
# # Generate the image
|
||||||
url = img_endpoint
|
url = img_endpoint
|
||||||
|
n = 1
|
||||||
headers = {
|
headers = {
|
||||||
'Authorization': f"Bearer {api_key}",
|
'Authorization': f"Bearer {api_key}",
|
||||||
'Content-Type': 'application/json'
|
|
||||||
}
|
}
|
||||||
data = {
|
make_transparent(image_path, image_path+'.tsp.png')
|
||||||
'image': open(image_path, 'rb'),
|
make_square_image(image_path+'.tsp.png', image_path+'.tspsq.png')
|
||||||
'prompt': prompt,
|
resize_image(image_path+'.tspsq.png', image_path+'.ready.png', max_size=1024)
|
||||||
'n': 1,
|
image_path = image_path+'.ready.png'
|
||||||
'size': resolution,
|
with open(image_path, 'rb') as f:
|
||||||
'model': model,
|
file_content = f.read()
|
||||||
'response_format': 'url'
|
files = {
|
||||||
}
|
'image': (os.path.basename(image_path), file_content),
|
||||||
response = requests.post(url, headers=headers, json=data, proxies=proxies)
|
# 'mask': ('mask.png', open('mask.png', 'rb'))
|
||||||
|
'prompt': (None, prompt),
|
||||||
|
"n": (None, str(n)),
|
||||||
|
'size': (None, resolution),
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(url, headers=headers, files=files, proxies=proxies)
|
||||||
print(response.content)
|
print(response.content)
|
||||||
try:
|
try:
|
||||||
image_url = json.loads(response.content.decode('utf8'))['data'][0]['url']
|
image_url = json.loads(response.content.decode('utf8'))['data'][0]['url']
|
||||||
@@ -84,7 +93,7 @@ def edit_image(llm_kwargs, prompt, image_path, resolution="1024x1024", model="da
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
@@ -92,10 +101,14 @@ def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
|||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
history 聊天历史,前情提要
|
history 聊天历史,前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
|
if prompt.strip() == "":
|
||||||
|
chatbot.append((prompt, "[Local Message] 图像生成提示为空白,请在“输入区”输入图像生成提示。"))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
|
||||||
|
return
|
||||||
|
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||||
resolution = plugin_kwargs.get("advanced_arg", '1024x1024')
|
resolution = plugin_kwargs.get("advanced_arg", '1024x1024')
|
||||||
@@ -110,18 +123,27 @@ def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
|
if prompt.strip() == "":
|
||||||
|
chatbot.append((prompt, "[Local Message] 图像生成提示为空白,请在“输入区”输入图像生成提示。"))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
|
||||||
|
return
|
||||||
|
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||||
resolution = plugin_kwargs.get("advanced_arg", '1024x1024').lower()
|
resolution_arg = plugin_kwargs.get("advanced_arg", '1024x1024-standard-vivid').lower()
|
||||||
if resolution.endswith('-hd'):
|
parts = resolution_arg.split('-')
|
||||||
resolution = resolution.replace('-hd', '')
|
resolution = parts[0] # 解析分辨率
|
||||||
quality = 'hd'
|
quality = 'standard' # 质量与风格默认值
|
||||||
else:
|
style = 'vivid'
|
||||||
quality = 'standard'
|
# 遍历检查是否有额外参数
|
||||||
image_url, image_path = gen_image(llm_kwargs, prompt, resolution, model="dall-e-3", quality=quality)
|
for part in parts[1:]:
|
||||||
|
if part in ['hd', 'standard']:
|
||||||
|
quality = part
|
||||||
|
elif part in ['vivid', 'natural']:
|
||||||
|
style = part
|
||||||
|
image_url, image_path = gen_image(llm_kwargs, prompt, resolution, model="dall-e-3", quality=quality, style=style)
|
||||||
chatbot.append([prompt,
|
chatbot.append([prompt,
|
||||||
f'图像中转网址: <br/>`{image_url}`<br/>'+
|
f'图像中转网址: <br/>`{image_url}`<br/>'+
|
||||||
f'中转网址预览: <br/><div align="center"><img src="{image_url}"></div>'
|
f'中转网址预览: <br/><div align="center"><img src="{image_url}"></div>'
|
||||||
@@ -130,6 +152,7 @@ def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
|||||||
])
|
])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
|
||||||
|
|
||||||
|
|
||||||
class ImageEditState(GptAcademicState):
|
class ImageEditState(GptAcademicState):
|
||||||
# 尚未完成
|
# 尚未完成
|
||||||
def get_image_file(self, x):
|
def get_image_file(self, x):
|
||||||
@@ -142,6 +165,15 @@ class ImageEditState(GptAcademicState):
|
|||||||
file = None if not confirm else file_manifest[0]
|
file = None if not confirm else file_manifest[0]
|
||||||
return confirm, file
|
return confirm, file
|
||||||
|
|
||||||
|
def lock_plugin(self, chatbot):
|
||||||
|
chatbot._cookies['lock_plugin'] = 'crazy_functions.图片生成->图片修改_DALLE2'
|
||||||
|
self.dump_state(chatbot)
|
||||||
|
|
||||||
|
def unlock_plugin(self, chatbot):
|
||||||
|
self.reset()
|
||||||
|
chatbot._cookies['lock_plugin'] = None
|
||||||
|
self.dump_state(chatbot)
|
||||||
|
|
||||||
def get_resolution(self, x):
|
def get_resolution(self, x):
|
||||||
return (x in ['256x256', '512x512', '1024x1024']), x
|
return (x in ['256x256', '512x512', '1024x1024']), x
|
||||||
|
|
||||||
@@ -151,9 +183,9 @@ class ImageEditState(GptAcademicState):
|
|||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
self.req = [
|
self.req = [
|
||||||
{'value':None, 'description': '请先上传图像(必须是.png格式), 然后再次点击本插件', 'verify_fn': self.get_image_file},
|
{'value':None, 'description': '请先上传图像(必须是.png格式), 然后再次点击本插件', 'verify_fn': self.get_image_file},
|
||||||
{'value':None, 'description': '请输入分辨率,可选:256x256, 512x512 或 1024x1024', 'verify_fn': self.get_resolution},
|
{'value':None, 'description': '请输入分辨率,可选:256x256, 512x512 或 1024x1024, 然后再次点击本插件', 'verify_fn': self.get_resolution},
|
||||||
{'value':None, 'description': '请输入修改需求,建议您使用英文提示词', 'verify_fn': self.get_prompt},
|
{'value':None, 'description': '请输入修改需求,建议您使用英文提示词, 然后再次点击本插件', 'verify_fn': self.get_prompt},
|
||||||
]
|
]
|
||||||
self.info = ""
|
self.info = ""
|
||||||
|
|
||||||
@@ -163,7 +195,7 @@ class ImageEditState(GptAcademicState):
|
|||||||
confirm, res = r['verify_fn'](prompt)
|
confirm, res = r['verify_fn'](prompt)
|
||||||
if confirm:
|
if confirm:
|
||||||
r['value'] = res
|
r['value'] = res
|
||||||
self.set_state(chatbot, 'dummy_key', 'dummy_value')
|
self.dump_state(chatbot)
|
||||||
break
|
break
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@@ -177,28 +209,68 @@ class ImageEditState(GptAcademicState):
|
|||||||
return all([x['value'] is not None for x in self.req])
|
return all([x['value'] is not None for x in self.req])
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 图片修改_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 图片修改_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
# 尚未完成
|
# 尚未完成
|
||||||
history = [] # 清空历史
|
history = [] # 清空历史
|
||||||
state = ImageEditState.get_state(chatbot, ImageEditState)
|
state = ImageEditState.get_state(chatbot, ImageEditState)
|
||||||
state = state.feed(prompt, chatbot)
|
state = state.feed(prompt, chatbot)
|
||||||
|
state.lock_plugin(chatbot)
|
||||||
if not state.already_obtained_all_materials():
|
if not state.already_obtained_all_materials():
|
||||||
chatbot.append(["图片修改(先上传图片,再输入修改需求,最后输入分辨率)", state.next_req()])
|
chatbot.append(["图片修改\n\n1. 上传图片(图片中需要修改的位置用橡皮擦擦除为纯白色,即RGB=255,255,255)\n2. 输入分辨率 \n3. 输入修改需求", state.next_req()])
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
return
|
return
|
||||||
|
|
||||||
image_path = state.req[0]
|
image_path = state.req[0]['value']
|
||||||
resolution = state.req[1]
|
resolution = state.req[1]['value']
|
||||||
prompt = state.req[2]
|
prompt = state.req[2]['value']
|
||||||
chatbot.append(["图片修改, 执行中", f"图片:`{image_path}`<br/>分辨率:`{resolution}`<br/>修改需求:`{prompt}`"])
|
chatbot.append(["图片修改, 执行中", f"图片:`{image_path}`<br/>分辨率:`{resolution}`<br/>修改需求:`{prompt}`"])
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
image_url, image_path = edit_image(llm_kwargs, prompt, image_path, resolution)
|
image_url, image_path = edit_image(llm_kwargs, prompt, image_path, resolution)
|
||||||
chatbot.append([state.prompt,
|
chatbot.append([prompt,
|
||||||
f'图像中转网址: <br/>`{image_url}`<br/>'+
|
f'图像中转网址: <br/>`{image_url}`<br/>'+
|
||||||
f'中转网址预览: <br/><div align="center"><img src="{image_url}"></div>'
|
f'中转网址预览: <br/><div align="center"><img src="{image_url}"></div>'
|
||||||
f'本地文件地址: <br/>`{image_path}`<br/>'+
|
f'本地文件地址: <br/>`{image_path}`<br/>'+
|
||||||
f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'
|
f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'
|
||||||
])
|
])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
|
||||||
|
state.unlock_plugin(chatbot)
|
||||||
|
|
||||||
|
def make_transparent(input_image_path, output_image_path):
|
||||||
|
from PIL import Image
|
||||||
|
image = Image.open(input_image_path)
|
||||||
|
image = image.convert("RGBA")
|
||||||
|
data = image.getdata()
|
||||||
|
new_data = []
|
||||||
|
for item in data:
|
||||||
|
if item[0] == 255 and item[1] == 255 and item[2] == 255:
|
||||||
|
new_data.append((255, 255, 255, 0))
|
||||||
|
else:
|
||||||
|
new_data.append(item)
|
||||||
|
image.putdata(new_data)
|
||||||
|
image.save(output_image_path, "PNG")
|
||||||
|
|
||||||
|
def resize_image(input_path, output_path, max_size=1024):
|
||||||
|
from PIL import Image
|
||||||
|
with Image.open(input_path) as img:
|
||||||
|
width, height = img.size
|
||||||
|
if width > max_size or height > max_size:
|
||||||
|
if width >= height:
|
||||||
|
new_width = max_size
|
||||||
|
new_height = int((max_size / width) * height)
|
||||||
|
else:
|
||||||
|
new_height = max_size
|
||||||
|
new_width = int((max_size / height) * width)
|
||||||
|
|
||||||
|
resized_img = img.resize(size=(new_width, new_height))
|
||||||
|
resized_img.save(output_path)
|
||||||
|
else:
|
||||||
|
img.save(output_path)
|
||||||
|
|
||||||
|
def make_square_image(input_path, output_path):
|
||||||
|
from PIL import Image
|
||||||
|
with Image.open(input_path) as img:
|
||||||
|
width, height = img.size
|
||||||
|
size = max(width, height)
|
||||||
|
new_img = Image.new("RGBA", (size, size), color="black")
|
||||||
|
new_img.paste(img, ((size - width) // 2, (size - height) // 2))
|
||||||
|
new_img.save(output_path)
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ def remove_model_prefix(llm):
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
@@ -29,7 +29,7 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
history 聊天历史,前情提要
|
history 聊天历史,前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
# 检查当前的模型是否符合要求
|
# 检查当前的模型是否符合要求
|
||||||
supported_llms = [
|
supported_llms = [
|
||||||
@@ -51,13 +51,6 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|||||||
if model_info[llm_kwargs['llm_model']]["endpoint"] is not None: # 如果不是本地模型,加载API_KEY
|
if model_info[llm_kwargs['llm_model']]["endpoint"] is not None: # 如果不是本地模型,加载API_KEY
|
||||||
llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
||||||
|
|
||||||
# 检查当前的模型是否符合要求
|
|
||||||
API_URL_REDIRECT = get_conf('API_URL_REDIRECT')
|
|
||||||
if len(API_URL_REDIRECT) > 0:
|
|
||||||
chatbot.append([f"处理任务: {txt}", f"暂不支持中转."])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
return
|
|
||||||
|
|
||||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
try:
|
try:
|
||||||
import autogen
|
import autogen
|
||||||
@@ -96,7 +89,7 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|||||||
history = []
|
history = []
|
||||||
chatbot.append(["正在启动: 多智能体终端", "插件动态生成, 执行开始, 作者 Microsoft & Binary-Husky."])
|
chatbot.append(["正在启动: 多智能体终端", "插件动态生成, 执行开始, 作者 Microsoft & Binary-Husky."])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
executor = AutoGenMath(llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
|
executor = AutoGenMath(llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
|
||||||
persistent_class_multi_user_manager.set(persistent_key, executor)
|
persistent_class_multi_user_manager.set(persistent_key, executor)
|
||||||
exit_reason = yield from executor.main_process_ui_control(txt, create_or_resume="create")
|
exit_reason = yield from executor.main_process_ui_control(txt, create_or_resume="create")
|
||||||
|
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ def read_file_to_chat(chatbot, history, file_name):
|
|||||||
return chatbot, history
|
return chatbot, history
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
@@ -77,7 +77,7 @@ def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
history 聊天历史,前情提要
|
history 聊天历史,前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
chatbot.append(("保存当前对话",
|
chatbot.append(("保存当前对话",
|
||||||
@@ -91,7 +91,7 @@ def hide_cwd(str):
|
|||||||
return str.replace(current_path, replace_path)
|
return str.replace(current_path, replace_path)
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
@@ -99,7 +99,7 @@ def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
history 聊天历史,前情提要
|
history 聊天历史,前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
from .crazy_utils import get_files_from_everything
|
from .crazy_utils import get_files_from_everything
|
||||||
success, file_manifest, _ = get_files_from_everything(txt, type='.html')
|
success, file_manifest, _ = get_files_from_everything(txt, type='.html')
|
||||||
@@ -126,7 +126,7 @@ def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
return
|
return
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
@@ -134,7 +134,7 @@ def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot
|
|||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
history 聊天历史,前情提要
|
history 聊天历史,前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import glob, os
|
import glob, os
|
||||||
|
|||||||
@@ -29,17 +29,12 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot
|
|||||||
except:
|
except:
|
||||||
raise RuntimeError('请先将.doc文档转换为.docx文档。')
|
raise RuntimeError('请先将.doc文档转换为.docx文档。')
|
||||||
|
|
||||||
print(file_content)
|
|
||||||
# private_upload里面的文件名在解压zip后容易出现乱码(rar和7z格式正常),故可以只分析文章内容,不输入文件名
|
# private_upload里面的文件名在解压zip后容易出现乱码(rar和7z格式正常),故可以只分析文章内容,不输入文件名
|
||||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||||
from request_llms.bridge_all import model_info
|
from request_llms.bridge_all import model_info
|
||||||
max_token = model_info[llm_kwargs['llm_model']]['max_token']
|
max_token = model_info[llm_kwargs['llm_model']]['max_token']
|
||||||
TOKEN_LIMIT_PER_FRAGMENT = max_token * 3 // 4
|
TOKEN_LIMIT_PER_FRAGMENT = max_token * 3 // 4
|
||||||
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
paper_fragments = breakdown_text_to_satisfy_token_limit(txt=file_content, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
|
||||||
txt=file_content,
|
|
||||||
get_token_fn=model_info[llm_kwargs['llm_model']]['token_cnt'],
|
|
||||||
limit=TOKEN_LIMIT_PER_FRAGMENT
|
|
||||||
)
|
|
||||||
this_paper_history = []
|
this_paper_history = []
|
||||||
for i, paper_frag in enumerate(paper_fragments):
|
for i, paper_frag in enumerate(paper_fragments):
|
||||||
i_say = f'请对下面的文章片段用中文做概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{paper_frag}```'
|
i_say = f'请对下面的文章片段用中文做概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{paper_frag}```'
|
||||||
@@ -84,7 +79,7 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
import glob, os
|
import glob, os
|
||||||
|
|
||||||
# 基本信息:功能、贡献者
|
# 基本信息:功能、贡献者
|
||||||
|
|||||||
@@ -28,8 +28,8 @@ class PaperFileGroup():
|
|||||||
self.sp_file_index.append(index)
|
self.sp_file_index.append(index)
|
||||||
self.sp_file_tag.append(self.file_paths[index])
|
self.sp_file_tag.append(self.file_paths[index])
|
||||||
else:
|
else:
|
||||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||||
for j, segment in enumerate(segments):
|
for j, segment in enumerate(segments):
|
||||||
self.sp_file_contents.append(segment)
|
self.sp_file_contents.append(segment)
|
||||||
self.sp_file_index.append(index)
|
self.sp_file_index.append(index)
|
||||||
@@ -153,7 +153,7 @@ def get_files_from_everything(txt, preference=''):
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
# 基本信息:功能、贡献者
|
# 基本信息:功能、贡献者
|
||||||
chatbot.append([
|
chatbot.append([
|
||||||
"函数插件功能?",
|
"函数插件功能?",
|
||||||
@@ -193,7 +193,7 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
# 基本信息:功能、贡献者
|
# 基本信息:功能、贡献者
|
||||||
chatbot.append([
|
chatbot.append([
|
||||||
"函数插件功能?",
|
"函数插件功能?",
|
||||||
@@ -226,7 +226,7 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
# 基本信息:功能、贡献者
|
# 基本信息:功能、贡献者
|
||||||
chatbot.append([
|
chatbot.append([
|
||||||
"函数插件功能?",
|
"函数插件功能?",
|
||||||
|
|||||||
@@ -20,14 +20,9 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
|
|||||||
|
|
||||||
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
||||||
|
|
||||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||||
from request_llms.bridge_all import model_info
|
paper_fragments = breakdown_text_to_satisfy_token_limit(txt=file_content, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
|
||||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
page_one_fragments = breakdown_text_to_satisfy_token_limit(txt=str(page_one), limit=TOKEN_LIMIT_PER_FRAGMENT//4, llm_model=llm_kwargs['llm_model'])
|
||||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
|
||||||
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
|
||||||
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
|
|
||||||
page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
|
||||||
txt=str(page_one), get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
|
|
||||||
# 为了更好的效果,我们剥离Introduction之后的部分(如果有)
|
# 为了更好的效果,我们剥离Introduction之后的部分(如果有)
|
||||||
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
|
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
|
||||||
|
|
||||||
@@ -106,7 +101,7 @@ do not have too much repetitive information, numerical values using the original
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
import glob, os
|
import glob, os
|
||||||
|
|
||||||
# 基本信息:功能、贡献者
|
# 基本信息:功能、贡献者
|
||||||
|
|||||||
@@ -124,7 +124,7 @@ def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
import glob, os
|
import glob, os
|
||||||
|
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ def markdown_to_dict(article_content):
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
|
|
||||||
disable_auto_promotion(chatbot)
|
disable_auto_promotion(chatbot)
|
||||||
# 基本信息:功能、贡献者
|
# 基本信息:功能、贡献者
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import os
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
|
|
||||||
disable_auto_promotion(chatbot)
|
disable_auto_promotion(chatbot)
|
||||||
# 基本信息:功能、贡献者
|
# 基本信息:功能、贡献者
|
||||||
@@ -91,14 +91,9 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
|
|||||||
page_one = str(page_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
page_one = str(page_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
||||||
|
|
||||||
# 递归地切割PDF文件
|
# 递归地切割PDF文件
|
||||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||||
from request_llms.bridge_all import model_info
|
paper_fragments = breakdown_text_to_satisfy_token_limit(txt=file_content, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
|
||||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
page_one_fragments = breakdown_text_to_satisfy_token_limit(txt=page_one, limit=TOKEN_LIMIT_PER_FRAGMENT//4, llm_model=llm_kwargs['llm_model'])
|
||||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
|
||||||
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
|
||||||
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
|
|
||||||
page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
|
||||||
txt=page_one, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
|
|
||||||
|
|
||||||
# 为了更好的效果,我们剥离Introduction之后的部分(如果有)
|
# 为了更好的效果,我们剥离Introduction之后的部分(如果有)
|
||||||
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
|
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
from toolbox import CatchException, update_ui, gen_time_str
|
import os
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from toolbox import CatchException, update_ui, gen_time_str, promote_file_to_downloadzone
|
||||||
from .crazy_utils import input_clipping
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
|
from crazy_functions.crazy_utils import input_clipping
|
||||||
|
|
||||||
def inspect_dependency(chatbot, history):
|
def inspect_dependency(chatbot, history):
|
||||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
@@ -27,9 +28,10 @@ def eval_manim(code):
|
|||||||
class_name = get_class_name(code)
|
class_name = get_class_name(code)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
time_str = gen_time_str()
|
||||||
subprocess.check_output([sys.executable, '-c', f"from gpt_log.MyAnimation import {class_name}; {class_name}().render()"])
|
subprocess.check_output([sys.executable, '-c', f"from gpt_log.MyAnimation import {class_name}; {class_name}().render()"])
|
||||||
shutil.move('media/videos/1080p60/{class_name}.mp4', f'gpt_log/{class_name}-{gen_time_str()}.mp4')
|
shutil.move(f'media/videos/1080p60/{class_name}.mp4', f'gpt_log/{class_name}-{time_str}.mp4')
|
||||||
return f'gpt_log/{gen_time_str()}.mp4'
|
return f'gpt_log/{time_str}.mp4'
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
output = e.output.decode()
|
output = e.output.decode()
|
||||||
print(f"Command returned non-zero exit status {e.returncode}: {output}.")
|
print(f"Command returned non-zero exit status {e.returncode}: {output}.")
|
||||||
@@ -48,7 +50,7 @@ def get_code_block(reply):
|
|||||||
return matches[0].strip('python') # code block
|
return matches[0].strip('python') # code block
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
@@ -56,7 +58,7 @@ def 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
history 聊天历史,前情提要
|
history 聊天历史,前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
# 清空历史,以免输入溢出
|
# 清空历史,以免输入溢出
|
||||||
history = []
|
history = []
|
||||||
@@ -94,6 +96,8 @@ def 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
res = eval_manim(code)
|
res = eval_manim(code)
|
||||||
|
|
||||||
chatbot.append(("生成的视频文件路径", res))
|
chatbot.append(("生成的视频文件路径", res))
|
||||||
|
if os.path.exists(res):
|
||||||
|
promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||||
|
|
||||||
# 在这里放一些网上搜集的demo,辅助gpt生成代码
|
# 在这里放一些网上搜集的demo,辅助gpt生成代码
|
||||||
|
|||||||
@@ -18,14 +18,9 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|||||||
|
|
||||||
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
||||||
|
|
||||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||||
from request_llms.bridge_all import model_info
|
paper_fragments = breakdown_text_to_satisfy_token_limit(txt=file_content, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
|
||||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
page_one_fragments = breakdown_text_to_satisfy_token_limit(txt=str(page_one), limit=TOKEN_LIMIT_PER_FRAGMENT//4, llm_model=llm_kwargs['llm_model'])
|
||||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
|
||||||
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
|
||||||
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
|
|
||||||
page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
|
||||||
txt=str(page_one), get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
|
|
||||||
# 为了更好的效果,我们剥离Introduction之后的部分(如果有)
|
# 为了更好的效果,我们剥离Introduction之后的部分(如果有)
|
||||||
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
|
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
|
||||||
|
|
||||||
@@ -45,7 +40,7 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|||||||
for i in range(n_fragment):
|
for i in range(n_fragment):
|
||||||
NUM_OF_WORD = MAX_WORD_TOTAL // n_fragment
|
NUM_OF_WORD = MAX_WORD_TOTAL // n_fragment
|
||||||
i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i]}"
|
i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i]}"
|
||||||
i_say_show_user = f"[{i+1}/{n_fragment}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i][:200]}"
|
i_say_show_user = f"[{i+1}/{n_fragment}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i][:200]} ...."
|
||||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问
|
||||||
llm_kwargs, chatbot,
|
llm_kwargs, chatbot,
|
||||||
history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果
|
history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果
|
||||||
@@ -68,7 +63,7 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
import glob, os
|
import glob, os
|
||||||
|
|
||||||
# 基本信息:功能、贡献者
|
# 基本信息:功能、贡献者
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
import glob, os
|
import glob, os
|
||||||
if os.path.exists(txt):
|
if os.path.exists(txt):
|
||||||
|
|||||||
302
crazy_functions/生成多种Mermaid图表.py
普通文件
302
crazy_functions/生成多种Mermaid图表.py
普通文件
@@ -0,0 +1,302 @@
|
|||||||
|
from toolbox import CatchException, update_ui, report_exception
|
||||||
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
|
from .crazy_utils import read_and_clean_pdf_text
|
||||||
|
import datetime
|
||||||
|
|
||||||
|
#以下是每类图表的PROMPT
|
||||||
|
SELECT_PROMPT = """
|
||||||
|
“{subject}”
|
||||||
|
=============
|
||||||
|
以上是从文章中提取的摘要,将会使用这些摘要绘制图表。请你选择一个合适的图表类型:
|
||||||
|
1 流程图
|
||||||
|
2 序列图
|
||||||
|
3 类图
|
||||||
|
4 饼图
|
||||||
|
5 甘特图
|
||||||
|
6 状态图
|
||||||
|
7 实体关系图
|
||||||
|
8 象限提示图
|
||||||
|
不需要解释原因,仅需要输出单个不带任何标点符号的数字。
|
||||||
|
"""
|
||||||
|
#没有思维导图!!!测试发现模型始终会优先选择思维导图
|
||||||
|
#流程图
|
||||||
|
PROMPT_1 = """
|
||||||
|
请你给出围绕“{subject}”的逻辑关系图,使用mermaid语法,mermaid语法举例:
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
P(编程) --> L1(Python)
|
||||||
|
P(编程) --> L2(C)
|
||||||
|
P(编程) --> L3(C++)
|
||||||
|
P(编程) --> L4(Javascipt)
|
||||||
|
P(编程) --> L5(PHP)
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
#序列图
|
||||||
|
PROMPT_2 = """
|
||||||
|
请你给出围绕“{subject}”的序列图,使用mermaid语法,mermaid语法举例:
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
participant A as 用户
|
||||||
|
participant B as 系统
|
||||||
|
A->>B: 登录请求
|
||||||
|
B->>A: 登录成功
|
||||||
|
A->>B: 获取数据
|
||||||
|
B->>A: 返回数据
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
#类图
|
||||||
|
PROMPT_3 = """
|
||||||
|
请你给出围绕“{subject}”的类图,使用mermaid语法,mermaid语法举例:
|
||||||
|
```mermaid
|
||||||
|
classDiagram
|
||||||
|
Class01 <|-- AveryLongClass : Cool
|
||||||
|
Class03 *-- Class04
|
||||||
|
Class05 o-- Class06
|
||||||
|
Class07 .. Class08
|
||||||
|
Class09 --> C2 : Where am i?
|
||||||
|
Class09 --* C3
|
||||||
|
Class09 --|> Class07
|
||||||
|
Class07 : equals()
|
||||||
|
Class07 : Object[] elementData
|
||||||
|
Class01 : size()
|
||||||
|
Class01 : int chimp
|
||||||
|
Class01 : int gorilla
|
||||||
|
Class08 <--> C2: Cool label
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
#饼图
|
||||||
|
PROMPT_4 = """
|
||||||
|
请你给出围绕“{subject}”的饼图,使用mermaid语法,mermaid语法举例:
|
||||||
|
```mermaid
|
||||||
|
pie title Pets adopted by volunteers
|
||||||
|
"狗" : 386
|
||||||
|
"猫" : 85
|
||||||
|
"兔子" : 15
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
#甘特图
|
||||||
|
PROMPT_5 = """
|
||||||
|
请你给出围绕“{subject}”的甘特图,使用mermaid语法,mermaid语法举例:
|
||||||
|
```mermaid
|
||||||
|
gantt
|
||||||
|
title 项目开发流程
|
||||||
|
dateFormat YYYY-MM-DD
|
||||||
|
section 设计
|
||||||
|
需求分析 :done, des1, 2024-01-06,2024-01-08
|
||||||
|
原型设计 :active, des2, 2024-01-09, 3d
|
||||||
|
UI设计 : des3, after des2, 5d
|
||||||
|
section 开发
|
||||||
|
前端开发 :2024-01-20, 10d
|
||||||
|
后端开发 :2024-01-20, 10d
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
#状态图
|
||||||
|
PROMPT_6 = """
|
||||||
|
请你给出围绕“{subject}”的状态图,使用mermaid语法,mermaid语法举例:
|
||||||
|
```mermaid
|
||||||
|
stateDiagram-v2
|
||||||
|
[*] --> Still
|
||||||
|
Still --> [*]
|
||||||
|
Still --> Moving
|
||||||
|
Moving --> Still
|
||||||
|
Moving --> Crash
|
||||||
|
Crash --> [*]
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
#实体关系图
|
||||||
|
PROMPT_7 = """
|
||||||
|
请你给出围绕“{subject}”的实体关系图,使用mermaid语法,mermaid语法举例:
|
||||||
|
```mermaid
|
||||||
|
erDiagram
|
||||||
|
CUSTOMER ||--o{ ORDER : places
|
||||||
|
ORDER ||--|{ LINE-ITEM : contains
|
||||||
|
CUSTOMER {
|
||||||
|
string name
|
||||||
|
string id
|
||||||
|
}
|
||||||
|
ORDER {
|
||||||
|
string orderNumber
|
||||||
|
date orderDate
|
||||||
|
string customerID
|
||||||
|
}
|
||||||
|
LINE-ITEM {
|
||||||
|
number quantity
|
||||||
|
string productID
|
||||||
|
}
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
#象限提示图
|
||||||
|
PROMPT_8 = """
|
||||||
|
请你给出围绕“{subject}”的象限图,使用mermaid语法,mermaid语法举例:
|
||||||
|
```mermaid
|
||||||
|
graph LR
|
||||||
|
A[Hard skill] --> B(Programming)
|
||||||
|
A[Hard skill] --> C(Design)
|
||||||
|
D[Soft skill] --> E(Coordination)
|
||||||
|
D[Soft skill] --> F(Communication)
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
#思维导图
|
||||||
|
PROMPT_9 = """
|
||||||
|
{subject}
|
||||||
|
==========
|
||||||
|
请给出上方内容的思维导图,充分考虑其之间的逻辑,使用mermaid语法,mermaid语法举例:
|
||||||
|
```mermaid
|
||||||
|
mindmap
|
||||||
|
root((mindmap))
|
||||||
|
Origins
|
||||||
|
Long history
|
||||||
|
::icon(fa fa-book)
|
||||||
|
Popularisation
|
||||||
|
British popular psychology author Tony Buzan
|
||||||
|
Research
|
||||||
|
On effectiveness<br/>and features
|
||||||
|
On Automatic creation
|
||||||
|
Uses
|
||||||
|
Creative techniques
|
||||||
|
Strategic planning
|
||||||
|
Argument mapping
|
||||||
|
Tools
|
||||||
|
Pen and paper
|
||||||
|
Mermaid
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
|
||||||
|
def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs):
|
||||||
|
############################## <第 0 步,切割输入> ##################################
|
||||||
|
# 借用PDF切割中的函数对文本进行切割
|
||||||
|
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
||||||
|
txt = str(history).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
||||||
|
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||||
|
txt = breakdown_text_to_satisfy_token_limit(txt=txt, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
|
||||||
|
############################## <第 1 步,迭代地历遍整个文章,提取精炼信息> ##################################
|
||||||
|
i_say_show_user = f'首先你从历史记录或文件中提取摘要。'; gpt_say = "[Local Message] 收到。" # 用户提示
|
||||||
|
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI
|
||||||
|
results = []
|
||||||
|
MAX_WORD_TOTAL = 4096
|
||||||
|
n_txt = len(txt)
|
||||||
|
last_iteration_result = "从以下文本中提取摘要。"
|
||||||
|
if n_txt >= 20: print('文章极长,不能达到预期效果')
|
||||||
|
for i in range(n_txt):
|
||||||
|
NUM_OF_WORD = MAX_WORD_TOTAL // n_txt
|
||||||
|
i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {txt[i]}"
|
||||||
|
i_say_show_user = f"[{i+1}/{n_txt}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {txt[i][:200]} ...."
|
||||||
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问
|
||||||
|
llm_kwargs, chatbot,
|
||||||
|
history=["The main content of the previous section is?", last_iteration_result], # 迭代上一次的结果
|
||||||
|
sys_prompt="Extracts the main content from the text section where it is located for graphing purposes, answer me with Chinese." # 提示
|
||||||
|
)
|
||||||
|
results.append(gpt_say)
|
||||||
|
last_iteration_result = gpt_say
|
||||||
|
############################## <第 2 步,根据整理的摘要选择图表类型> ##################################
|
||||||
|
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||||
|
gpt_say = plugin_kwargs.get("advanced_arg", "") #将图表类型参数赋值为插件参数
|
||||||
|
results_txt = '\n'.join(results) #合并摘要
|
||||||
|
if gpt_say not in ['1','2','3','4','5','6','7','8','9']: #如插件参数不正确则使用对话模型判断
|
||||||
|
i_say_show_user = f'接下来将判断适合的图表类型,如连续3次判断失败将会使用流程图进行绘制'; gpt_say = "[Local Message] 收到。" # 用户提示
|
||||||
|
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=[]) # 更新UI
|
||||||
|
i_say = SELECT_PROMPT.format(subject=results_txt)
|
||||||
|
i_say_show_user = f'请判断适合使用的流程图类型,其中数字对应关系为:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图。由于不管提供文本是什么,模型大概率认为"思维导图"最合适,因此思维导图仅能通过参数调用。'
|
||||||
|
for i in range(3):
|
||||||
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
inputs=i_say,
|
||||||
|
inputs_show_user=i_say_show_user,
|
||||||
|
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
||||||
|
sys_prompt=""
|
||||||
|
)
|
||||||
|
if gpt_say in ['1','2','3','4','5','6','7','8','9']: #判断返回是否正确
|
||||||
|
break
|
||||||
|
if gpt_say not in ['1','2','3','4','5','6','7','8','9']:
|
||||||
|
gpt_say = '1'
|
||||||
|
############################## <第 3 步,根据选择的图表类型绘制图表> ##################################
|
||||||
|
if gpt_say == '1':
|
||||||
|
i_say = PROMPT_1.format(subject=results_txt)
|
||||||
|
elif gpt_say == '2':
|
||||||
|
i_say = PROMPT_2.format(subject=results_txt)
|
||||||
|
elif gpt_say == '3':
|
||||||
|
i_say = PROMPT_3.format(subject=results_txt)
|
||||||
|
elif gpt_say == '4':
|
||||||
|
i_say = PROMPT_4.format(subject=results_txt)
|
||||||
|
elif gpt_say == '5':
|
||||||
|
i_say = PROMPT_5.format(subject=results_txt)
|
||||||
|
elif gpt_say == '6':
|
||||||
|
i_say = PROMPT_6.format(subject=results_txt)
|
||||||
|
elif gpt_say == '7':
|
||||||
|
i_say = PROMPT_7.replace("{subject}", results_txt) #由于实体关系图用到了{}符号
|
||||||
|
elif gpt_say == '8':
|
||||||
|
i_say = PROMPT_8.format(subject=results_txt)
|
||||||
|
elif gpt_say == '9':
|
||||||
|
i_say = PROMPT_9.format(subject=results_txt)
|
||||||
|
i_say_show_user = f'请根据判断结果绘制相应的图表。如需绘制思维导图请使用参数调用,同时过大的图表可能需要复制到在线编辑器中进行渲染。'
|
||||||
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
inputs=i_say,
|
||||||
|
inputs_show_user=i_say_show_user,
|
||||||
|
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
||||||
|
sys_prompt="你精通使用mermaid语法来绘制图表,首先确保语法正确,其次避免在mermaid语法中使用不允许的字符,此外也应当分考虑图表的可读性。"
|
||||||
|
)
|
||||||
|
history.append(gpt_say)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||||
|
|
||||||
|
def 输入区文件处理(txt):
|
||||||
|
if txt == "": return False, txt
|
||||||
|
success = True
|
||||||
|
import glob
|
||||||
|
from .crazy_utils import get_files_from_everything
|
||||||
|
file_pdf,pdf_manifest,folder_pdf = get_files_from_everything(txt, '.pdf')
|
||||||
|
file_md,md_manifest,folder_md = get_files_from_everything(txt, '.md')
|
||||||
|
if len(pdf_manifest) == 0 and len(md_manifest) == 0:
|
||||||
|
return False, txt #如输入区内容不是文件则直接返回输入区内容
|
||||||
|
|
||||||
|
final_result = ""
|
||||||
|
if file_pdf:
|
||||||
|
for index, fp in enumerate(pdf_manifest):
|
||||||
|
file_content, page_one = read_and_clean_pdf_text(fp) # (尝试)按照章节切割PDF
|
||||||
|
file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
||||||
|
final_result += "\n" + file_content
|
||||||
|
if file_md:
|
||||||
|
for index, fp in enumerate(md_manifest):
|
||||||
|
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||||
|
file_content = f.read()
|
||||||
|
file_content = file_content.encode('utf-8', 'ignore').decode()
|
||||||
|
final_result += "\n" + file_content
|
||||||
|
return True, final_result
|
||||||
|
|
||||||
|
@CatchException
|
||||||
|
def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
"""
|
||||||
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
|
plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
|
||||||
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
|
history 聊天历史,前情提要
|
||||||
|
system_prompt 给gpt的静默提醒
|
||||||
|
web_port 当前软件运行的端口号
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
|
||||||
|
# 基本信息:功能、贡献者
|
||||||
|
chatbot.append([
|
||||||
|
"函数插件功能?",
|
||||||
|
"根据当前聊天历史或文件中(文件内容优先)绘制多种mermaid图表,将会由对话模型首先判断适合的图表类型,随后绘制图表。\
|
||||||
|
\n您也可以使用插件参数指定绘制的图表类型,函数插件贡献者: Menghuan1918"])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
|
try:
|
||||||
|
import fitz
|
||||||
|
except:
|
||||||
|
report_exception(chatbot, history,
|
||||||
|
a = f"解析项目: {txt}",
|
||||||
|
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
if os.path.exists(txt): #如输入区无内容则直接解析历史记录
|
||||||
|
file_exist, txt = 输入区文件处理(txt)
|
||||||
|
else:
|
||||||
|
file_exist = False
|
||||||
|
|
||||||
|
if file_exist : history = [] #如输入区内容为文件则清空历史记录
|
||||||
|
history.append(txt) #将解析后的txt传递加入到历史中
|
||||||
|
|
||||||
|
yield from 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs)
|
||||||
@@ -1,10 +1,19 @@
|
|||||||
from toolbox import CatchException, update_ui, ProxyNetworkActivate, update_ui_lastest_msg
|
from toolbox import CatchException, update_ui, ProxyNetworkActivate, update_ui_lastest_msg, get_log_folder, get_user
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_files_from_everything
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_files_from_everything
|
||||||
|
|
||||||
|
install_msg ="""
|
||||||
|
|
||||||
|
1. python -m pip install torch --index-url https://download.pytorch.org/whl/cpu
|
||||||
|
|
||||||
|
2. python -m pip install transformers protobuf langchain sentence-transformers faiss-cpu nltk beautifulsoup4 bitsandbytes tabulate icetk --upgrade
|
||||||
|
|
||||||
|
3. python -m pip install unstructured[all-docs] --upgrade
|
||||||
|
|
||||||
|
4. python -c 'import nltk; nltk.download("punkt")'
|
||||||
|
"""
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
|
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
|
||||||
@@ -12,7 +21,7 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
history 聊天历史,前情提要
|
history 聊天历史,前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
|
|
||||||
@@ -25,15 +34,15 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|||||||
|
|
||||||
# resolve deps
|
# resolve deps
|
||||||
try:
|
try:
|
||||||
from zh_langchain import construct_vector_store
|
# from zh_langchain import construct_vector_store
|
||||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
# from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||||
from .crazy_utils import knowledge_archive_interface
|
from crazy_functions.vector_fns.vector_database import knowledge_archive_interface
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
chatbot.append(["依赖不足", "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."])
|
chatbot.append(["依赖不足", f"{str(e)}\n\n导入依赖失败。请用以下命令安装" + install_msg])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
from .crazy_utils import try_install_deps
|
# from .crazy_utils import try_install_deps
|
||||||
try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
# try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
||||||
yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
|
# yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
|
||||||
return
|
return
|
||||||
|
|
||||||
# < --------------------读取文件--------------- >
|
# < --------------------读取文件--------------- >
|
||||||
@@ -62,30 +71,31 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|||||||
print('Establishing knowledge archive ...')
|
print('Establishing knowledge archive ...')
|
||||||
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
|
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
|
||||||
kai = knowledge_archive_interface()
|
kai = knowledge_archive_interface()
|
||||||
kai.feed_archive(file_manifest=file_manifest, id=kai_id)
|
vs_path = get_log_folder(user=get_user(chatbot), plugin_name='vec_store')
|
||||||
kai_files = kai.get_loaded_file()
|
kai.feed_archive(file_manifest=file_manifest, vs_path=vs_path, id=kai_id)
|
||||||
|
kai_files = kai.get_loaded_file(vs_path=vs_path)
|
||||||
kai_files = '<br/>'.join(kai_files)
|
kai_files = '<br/>'.join(kai_files)
|
||||||
# chatbot.append(['知识库构建成功', "正在将知识库存储至cookie中"])
|
# chatbot.append(['知识库构建成功', "正在将知识库存储至cookie中"])
|
||||||
# yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
# yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
# chatbot._cookies['langchain_plugin_embedding'] = kai.get_current_archive_id()
|
# chatbot._cookies['langchain_plugin_embedding'] = kai.get_current_archive_id()
|
||||||
# chatbot._cookies['lock_plugin'] = 'crazy_functions.Langchain知识库->读取知识库作答'
|
# chatbot._cookies['lock_plugin'] = 'crazy_functions.知识库文件注入->读取知识库作答'
|
||||||
# chatbot.append(['完成', "“根据知识库作答”函数插件已经接管问答系统, 提问吧! 但注意, 您接下来不能再使用其他插件了,刷新页面即可以退出知识库问答模式。"])
|
# chatbot.append(['完成', "“根据知识库作答”函数插件已经接管问答系统, 提问吧! 但注意, 您接下来不能再使用其他插件了,刷新页面即可以退出知识库问答模式。"])
|
||||||
chatbot.append(['构建完成', f"当前知识库内的有效文件:\n\n---\n\n{kai_files}\n\n---\n\n请切换至“知识库问答”插件进行知识库访问, 或者使用此插件继续上传更多文件。"])
|
chatbot.append(['构建完成', f"当前知识库内的有效文件:\n\n---\n\n{kai_files}\n\n---\n\n请切换至“知识库问答”插件进行知识库访问, 或者使用此插件继续上传更多文件。"])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port=-1):
|
def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request=-1):
|
||||||
# resolve deps
|
# resolve deps
|
||||||
try:
|
try:
|
||||||
from zh_langchain import construct_vector_store
|
# from zh_langchain import construct_vector_store
|
||||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
# from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||||
from .crazy_utils import knowledge_archive_interface
|
from crazy_functions.vector_fns.vector_database import knowledge_archive_interface
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
chatbot.append(["依赖不足", "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."])
|
chatbot.append(["依赖不足", f"{str(e)}\n\n导入依赖失败。请用以下命令安装" + install_msg])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
from .crazy_utils import try_install_deps
|
# from .crazy_utils import try_install_deps
|
||||||
try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
# try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
||||||
yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
|
# yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
|
||||||
return
|
return
|
||||||
|
|
||||||
# < ------------------- --------------- >
|
# < ------------------- --------------- >
|
||||||
@@ -93,7 +103,8 @@ def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
|
|
||||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||||
kai_id = plugin_kwargs.get("advanced_arg", 'default')
|
kai_id = plugin_kwargs.get("advanced_arg", 'default')
|
||||||
resp, prompt = kai.answer_with_archive_by_id(txt, kai_id)
|
vs_path = get_log_folder(user=get_user(chatbot), plugin_name='vec_store')
|
||||||
|
resp, prompt = kai.answer_with_archive_by_id(txt, kai_id, vs_path)
|
||||||
|
|
||||||
chatbot.append((txt, f'[知识库 {kai_id}] ' + prompt))
|
chatbot.append((txt, f'[知识库 {kai_id}] ' + prompt))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||||
@@ -55,7 +55,7 @@ def scrape_text(url, proxies) -> str:
|
|||||||
return text
|
return text
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
@@ -63,7 +63,7 @@ def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
history 聊天历史,前情提要
|
history 聊天历史,前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
chatbot.append((f"请结合互联网信息回答以下问题:{txt}",
|
chatbot.append((f"请结合互联网信息回答以下问题:{txt}",
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ def scrape_text(url, proxies) -> str:
|
|||||||
return text
|
return text
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 连接bing搜索回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 连接bing搜索回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
@@ -63,7 +63,7 @@ def 连接bing搜索回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, histor
|
|||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
history 聊天历史,前情提要
|
history 聊天历史,前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
chatbot.append((f"请结合互联网信息回答以下问题:{txt}",
|
chatbot.append((f"请结合互联网信息回答以下问题:{txt}",
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ def analyze_intention_with_simple_rules(txt):
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
disable_auto_promotion(chatbot=chatbot)
|
disable_auto_promotion(chatbot=chatbot)
|
||||||
# 获取当前虚空终端状态
|
# 获取当前虚空终端状态
|
||||||
state = VoidTerminalState.get_state(chatbot)
|
state = VoidTerminalState.get_state(chatbot)
|
||||||
@@ -121,7 +121,7 @@ def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True)
|
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True)
|
||||||
state.unlock_plugin(chatbot=chatbot)
|
state.unlock_plugin(chatbot=chatbot)
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
yield from 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
|
yield from 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
# 如果意图模糊,提示
|
# 如果意图模糊,提示
|
||||||
@@ -133,7 +133,7 @@ def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
history = []
|
history = []
|
||||||
chatbot.append(("虚空终端状态: ", f"正在执行任务: {txt}"))
|
chatbot.append(("虚空终端状态: ", f"正在执行任务: {txt}"))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|||||||
@@ -12,13 +12,6 @@ class PaperFileGroup():
|
|||||||
self.sp_file_index = []
|
self.sp_file_index = []
|
||||||
self.sp_file_tag = []
|
self.sp_file_tag = []
|
||||||
|
|
||||||
# count_token
|
|
||||||
from request_llms.bridge_all import model_info
|
|
||||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
|
||||||
def get_token_num(txt): return len(
|
|
||||||
enc.encode(txt, disallowed_special=()))
|
|
||||||
self.get_token_num = get_token_num
|
|
||||||
|
|
||||||
def run_file_split(self, max_token_limit=1900):
|
def run_file_split(self, max_token_limit=1900):
|
||||||
"""
|
"""
|
||||||
将长文本分离开来
|
将长文本分离开来
|
||||||
@@ -29,9 +22,8 @@ class PaperFileGroup():
|
|||||||
self.sp_file_index.append(index)
|
self.sp_file_index.append(index)
|
||||||
self.sp_file_tag.append(self.file_paths[index])
|
self.sp_file_tag.append(self.file_paths[index])
|
||||||
else:
|
else:
|
||||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||||
file_content, self.get_token_num, max_token_limit)
|
|
||||||
for j, segment in enumerate(segments):
|
for j, segment in enumerate(segments):
|
||||||
self.sp_file_contents.append(segment)
|
self.sp_file_contents.append(segment)
|
||||||
self.sp_file_index.append(index)
|
self.sp_file_index.append(index)
|
||||||
@@ -117,7 +109,7 @@ def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
|
|||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
chatbot.append([
|
chatbot.append([
|
||||||
"函数插件功能?",
|
"函数插件功能?",
|
||||||
"对IPynb文件进行解析。Contributor: codycjy."])
|
"对IPynb文件进行解析。Contributor: codycjy."])
|
||||||
|
|||||||
@@ -83,7 +83,8 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
|||||||
history=this_iteration_history_feed, # 迭代之前的分析
|
history=this_iteration_history_feed, # 迭代之前的分析
|
||||||
sys_prompt="你是一个程序架构分析师,正在分析一个项目的源代码。" + sys_prompt_additional)
|
sys_prompt="你是一个程序架构分析师,正在分析一个项目的源代码。" + sys_prompt_additional)
|
||||||
|
|
||||||
summary = "请用一句话概括这些文件的整体功能"
|
diagram_code = make_diagram(this_iteration_files, result, this_iteration_history_feed)
|
||||||
|
summary = "请用一句话概括这些文件的整体功能。\n\n" + diagram_code
|
||||||
summary_result = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
summary_result = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
inputs=summary,
|
inputs=summary,
|
||||||
inputs_show_user=summary,
|
inputs_show_user=summary,
|
||||||
@@ -104,9 +105,12 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
|||||||
chatbot.append(("完成了吗?", res))
|
chatbot.append(("完成了吗?", res))
|
||||||
yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
|
||||||
|
|
||||||
|
def make_diagram(this_iteration_files, result, this_iteration_history_feed):
|
||||||
|
from crazy_functions.diagram_fns.file_tree import build_file_tree_mermaid_diagram
|
||||||
|
return build_file_tree_mermaid_diagram(this_iteration_history_feed[0::2], this_iteration_history_feed[1::2], "项目示意图")
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
import glob
|
import glob
|
||||||
file_manifest = [f for f in glob.glob('./*.py')] + \
|
file_manifest = [f for f in glob.glob('./*.py')] + \
|
||||||
@@ -119,7 +123,7 @@ def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
import glob, os
|
import glob, os
|
||||||
if os.path.exists(txt):
|
if os.path.exists(txt):
|
||||||
@@ -137,7 +141,7 @@ def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
import glob, os
|
import glob, os
|
||||||
if os.path.exists(txt):
|
if os.path.exists(txt):
|
||||||
@@ -155,7 +159,7 @@ def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
import glob, os
|
import glob, os
|
||||||
if os.path.exists(txt):
|
if os.path.exists(txt):
|
||||||
@@ -175,7 +179,7 @@ def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, his
|
|||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
import glob, os
|
import glob, os
|
||||||
if os.path.exists(txt):
|
if os.path.exists(txt):
|
||||||
@@ -197,7 +201,7 @@ def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
import glob, os
|
import glob, os
|
||||||
if os.path.exists(txt):
|
if os.path.exists(txt):
|
||||||
@@ -219,7 +223,7 @@ def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
import glob, os
|
import glob, os
|
||||||
if os.path.exists(txt):
|
if os.path.exists(txt):
|
||||||
@@ -248,7 +252,7 @@ def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
import glob, os
|
import glob, os
|
||||||
if os.path.exists(txt):
|
if os.path.exists(txt):
|
||||||
@@ -269,7 +273,7 @@ def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
import glob, os
|
import glob, os
|
||||||
if os.path.exists(txt):
|
if os.path.exists(txt):
|
||||||
@@ -289,7 +293,7 @@ def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
|||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
import glob, os
|
import glob, os
|
||||||
if os.path.exists(txt):
|
if os.path.exists(txt):
|
||||||
@@ -311,7 +315,7 @@ def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
import glob, os
|
import glob, os
|
||||||
if os.path.exists(txt):
|
if os.path.exists(txt):
|
||||||
@@ -331,7 +335,7 @@ def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
txt_pattern = plugin_kwargs.get("advanced_arg")
|
txt_pattern = plugin_kwargs.get("advanced_arg")
|
||||||
txt_pattern = txt_pattern.replace(",", ",")
|
txt_pattern = txt_pattern.replace(",", ",")
|
||||||
# 将要匹配的模式(例如: *.c, *.cpp, *.py, config.toml)
|
# 将要匹配的模式(例如: *.c, *.cpp, *.py, config.toml)
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ from toolbox import CatchException, update_ui, get_conf
|
|||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
import datetime
|
import datetime
|
||||||
@CatchException
|
@CatchException
|
||||||
def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
@@ -10,7 +10,7 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
history 聊天历史,前情提要
|
history 聊天历史,前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
MULTI_QUERY_LLM_MODELS = get_conf('MULTI_QUERY_LLM_MODELS')
|
MULTI_QUERY_LLM_MODELS = get_conf('MULTI_QUERY_LLM_MODELS')
|
||||||
@@ -32,7 +32,7 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
@@ -40,7 +40,7 @@ def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history,
|
|||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
history 聊天历史,前情提要
|
history 聊天历史,前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
|
|
||||||
|
|||||||
@@ -166,7 +166,7 @@ class InterviewAssistant(AliyunASR):
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 语音助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 语音助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
# pip install -U openai-whisper
|
# pip install -U openai-whisper
|
||||||
chatbot.append(["对话助手函数插件:使用时,双手离开鼠标键盘吧", "音频助手, 正在听您讲话(点击“停止”键可终止程序)..."])
|
chatbot.append(["对话助手函数插件:使用时,双手离开鼠标键盘吧", "音频助手, 正在听您讲话(点击“停止”键可终止程序)..."])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
import glob, os
|
import glob, os
|
||||||
if os.path.exists(txt):
|
if os.path.exists(txt):
|
||||||
|
|||||||
@@ -132,7 +132,7 @@ def get_meta_information(url, chatbot, history):
|
|||||||
return profile
|
return profile
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
disable_auto_promotion(chatbot=chatbot)
|
disable_auto_promotion(chatbot=chatbot)
|
||||||
# 基本信息:功能、贡献者
|
# 基本信息:功能、贡献者
|
||||||
chatbot.append([
|
chatbot.append([
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import os
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
if txt:
|
if txt:
|
||||||
show_say = txt
|
show_say = txt
|
||||||
prompt = txt+'\n回答完问题后,再列出用户可能提出的三个问题。'
|
prompt = txt+'\n回答完问题后,再列出用户可能提出的三个问题。'
|
||||||
@@ -32,7 +32,7 @@ def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 清除缓存(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 清除缓存(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
chatbot.append(['清除本地缓存数据', '执行中. 删除数据'])
|
chatbot.append(['清除本地缓存数据', '执行中. 删除数据'])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
|||||||
@@ -1,19 +1,47 @@
|
|||||||
from toolbox import CatchException, update_ui
|
from toolbox import CatchException, update_ui
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
|
高阶功能模板函数示意图 = f"""
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
%% <gpt_academic_hide_mermaid_code> 一个特殊标记,用于在生成mermaid图表时隐藏代码块
|
||||||
|
subgraph 函数调用["函数调用过程"]
|
||||||
|
AA["输入栏用户输入的文本(txt)"] --> BB["gpt模型参数(llm_kwargs)"]
|
||||||
|
BB --> CC["插件模型参数(plugin_kwargs)"]
|
||||||
|
CC --> DD["对话显示框的句柄(chatbot)"]
|
||||||
|
DD --> EE["对话历史(history)"]
|
||||||
|
EE --> FF["系统提示词(system_prompt)"]
|
||||||
|
FF --> GG["当前用户信息(web_port)"]
|
||||||
|
|
||||||
|
A["开始(查询5天历史事件)"]
|
||||||
|
A --> B["获取当前月份和日期"]
|
||||||
|
B --> C["生成历史事件查询提示词"]
|
||||||
|
C --> D["调用大模型"]
|
||||||
|
D --> E["更新界面"]
|
||||||
|
E --> F["记录历史"]
|
||||||
|
F --> |"下一天"| B
|
||||||
|
end
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
"""
|
"""
|
||||||
|
# 高阶功能模板函数示意图:https://mermaid.live/edit#pako:eNptk1tvEkEYhv8KmattQpvlvOyFCcdeeaVXuoYssBwie8gyhCIlqVoLhrbbtAWNUpEGUkyMEDW2Fmn_DDOL_8LZHdOwxrnamX3f7_3mmZk6yKhZCfAgV1KrmYKoQ9fDuKC4yChX0nld1Aou1JzjznQ5fWmejh8LYHW6vG2a47YAnlCLNSIRolnenKBXI_zRIBrcuqRT890u7jZx7zMDt-AaMbnW1--5olGiz2sQjwfoQxsZL0hxplSSU0-rop4vrzmKR6O2JxYjHmwcL2Y_HDatVMkXlf86YzHbGY9bO5j8XE7O8Nsbc3iNB3ukL2SMcH-XIQBgWoVOZzxuOxOJOyc63EPGV6ZQLENVrznViYStTiaJ2vw2M2d9bByRnOXkgCnXylCSU5quyto_IcmkbdvctELmJ-j1ASW3uB3g5xOmKqVTmqr_Na3AtuS_dtBFm8H90XJyHkDDT7S9xXWb4HGmRChx64AOL5HRpUm411rM5uh4H78Z4V7fCZzytjZz2seto9XaNPFue07clLaVZF8UNLygJ-VES8lah_n-O-5Ozc7-77NzJ0-K0yr0ZYrmHdqAk50t2RbA4qq9uNohBASw7YpSgaRkLWCCAtxAlnRZLGbJba9bPwUAC5IsCYAnn1kpJ1ZKUACC0iBSsQLVBzUlA3ioVyQ3qGhZEUrxokiehAz4nFgqk1VNVABfB1uAD_g2_AGPl-W8nMcbCvsDblADfNCz4feyobDPy3rYEMtxwYYbPFNVUoHdCPmDHBv2cP4AMfrCbiBli-Q-3afv0X6WdsIjW2-10fgDy1SAig
|
||||||
|
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
|
plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
|
||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
history 聊天历史,前情提要
|
history 聊天历史,前情提要
|
||||||
system_prompt 给gpt的静默提醒
|
system_prompt 给gpt的静默提醒
|
||||||
web_port 当前软件运行的端口号
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
"""
|
"""
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
chatbot.append(("这是什么功能?", "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板(该函数只有20多行代码)。此外我们也提供可同步处理大量文件的多线程Demo供您参考。您若希望分享新的功能模组,请不吝PR!"))
|
chatbot.append((
|
||||||
|
"您正在调用插件:历史上的今天",
|
||||||
|
"[Local Message] 请注意,您正在调用一个[函数插件]的模板,该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板(该函数只有20多行代码)。此外我们也提供可同步处理大量文件的多线程Demo供您参考。您若希望分享新的功能模组,请不吝PR!" + 高阶功能模板函数示意图))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||||
for i in range(5):
|
for i in range(5):
|
||||||
currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month
|
currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month
|
||||||
@@ -27,3 +55,45 @@ def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
chatbot[-1] = (i_say, gpt_say)
|
chatbot[-1] = (i_say, gpt_say)
|
||||||
history.append(i_say);history.append(gpt_say)
|
history.append(i_say);history.append(gpt_say)
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
PROMPT = """
|
||||||
|
请你给出围绕“{subject}”的逻辑关系图,使用mermaid语法,mermaid语法举例:
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
P(编程) --> L1(Python)
|
||||||
|
P(编程) --> L2(C)
|
||||||
|
P(编程) --> L3(C++)
|
||||||
|
P(编程) --> L4(Javascipt)
|
||||||
|
P(编程) --> L5(PHP)
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
@CatchException
|
||||||
|
def 测试图表渲染(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
|
"""
|
||||||
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
|
plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
|
||||||
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
|
history 聊天历史,前情提要
|
||||||
|
system_prompt 给gpt的静默提醒
|
||||||
|
user_request 当前用户的请求信息(IP地址等)
|
||||||
|
"""
|
||||||
|
history = [] # 清空历史,以免输入溢出
|
||||||
|
chatbot.append(("这是什么功能?", "一个测试mermaid绘制图表的功能,您可以在输入框中输入一些关键词,然后使用mermaid+llm绘制图表。"))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||||
|
|
||||||
|
if txt == "": txt = "空白的输入栏" # 调皮一下
|
||||||
|
|
||||||
|
i_say_show_user = f'请绘制有关“{txt}”的逻辑关系图。'
|
||||||
|
i_say = PROMPT.format(subject=txt)
|
||||||
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
inputs=i_say,
|
||||||
|
inputs_show_user=i_say_show_user,
|
||||||
|
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
||||||
|
sys_prompt=""
|
||||||
|
)
|
||||||
|
history.append(i_say); history.append(gpt_say)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||||
@@ -229,4 +229,3 @@ services:
|
|||||||
# 不使用代理网络拉取最新代码
|
# 不使用代理网络拉取最新代码
|
||||||
command: >
|
command: >
|
||||||
bash -c "python3 -u main.py"
|
bash -c "python3 -u main.py"
|
||||||
|
|
||||||
|
|||||||
@@ -1,2 +1 @@
|
|||||||
# 此Dockerfile不再维护,请前往docs/GithubAction+ChatGLM+Moss
|
# 此Dockerfile不再维护,请前往docs/GithubAction+ChatGLM+Moss
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,53 @@
|
|||||||
|
# docker build -t gpt-academic-all-capacity -f docs/GithubAction+AllCapacity --network=host --build-arg http_proxy=http://localhost:10881 --build-arg https_proxy=http://localhost:10881 .
|
||||||
|
# docker build -t gpt-academic-all-capacity -f docs/GithubAction+AllCapacityBeta --network=host .
|
||||||
|
# docker run -it --net=host gpt-academic-all-capacity bash
|
||||||
|
|
||||||
|
# 从NVIDIA源,从而支持显卡(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
|
||||||
|
FROM fuqingxu/11.3.1-runtime-ubuntu20.04-with-texlive:latest
|
||||||
|
|
||||||
|
# use python3 as the system default python
|
||||||
|
WORKDIR /gpt
|
||||||
|
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
|
||||||
|
|
||||||
|
# # 非必要步骤,更换pip源 (以下三行,可以删除)
|
||||||
|
# RUN echo '[global]' > /etc/pip.conf && \
|
||||||
|
# echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
||||||
|
# echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
||||||
|
|
||||||
|
# 下载pytorch
|
||||||
|
RUN python3 -m pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu113
|
||||||
|
# 准备pip依赖
|
||||||
|
RUN python3 -m pip install openai numpy arxiv rich
|
||||||
|
RUN python3 -m pip install colorama Markdown pygments pymupdf
|
||||||
|
RUN python3 -m pip install python-docx moviepy pdfminer
|
||||||
|
RUN python3 -m pip install zh_langchain==0.2.1 pypinyin
|
||||||
|
RUN python3 -m pip install rarfile py7zr
|
||||||
|
RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
|
||||||
|
# 下载分支
|
||||||
|
WORKDIR /gpt
|
||||||
|
RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
|
||||||
|
WORKDIR /gpt/gpt_academic
|
||||||
|
RUN git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss
|
||||||
|
|
||||||
|
RUN python3 -m pip install -r requirements.txt
|
||||||
|
RUN python3 -m pip install -r request_llms/requirements_moss.txt
|
||||||
|
RUN python3 -m pip install -r request_llms/requirements_qwen.txt
|
||||||
|
RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
|
||||||
|
RUN python3 -m pip install -r request_llms/requirements_newbing.txt
|
||||||
|
RUN python3 -m pip install nougat-ocr
|
||||||
|
|
||||||
|
# 预热Tiktoken模块
|
||||||
|
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||||
|
|
||||||
|
# 安装知识库插件的额外依赖
|
||||||
|
RUN apt-get update && apt-get install libgl1 -y
|
||||||
|
RUN pip3 install transformers protobuf langchain sentence-transformers faiss-cpu nltk beautifulsoup4 bitsandbytes tabulate icetk --upgrade
|
||||||
|
RUN pip3 install unstructured[all-docs] --upgrade
|
||||||
|
RUN python3 -c 'from check_proxy import warm_up_vectordb; warm_up_vectordb()'
|
||||||
|
RUN rm -rf /usr/local/lib/python3.8/dist-packages/tests
|
||||||
|
|
||||||
|
|
||||||
|
# COPY .cache /root/.cache
|
||||||
|
# COPY config_private.py config_private.py
|
||||||
|
# 启动
|
||||||
|
CMD ["python3", "-u", "main.py"]
|
||||||
@@ -0,0 +1,26 @@
|
|||||||
|
# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM
|
||||||
|
# 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic-nolocal-vs -f docs/GithubAction+NoLocal+Vectordb .
|
||||||
|
# 如何运行: docker run --rm -it --net=host gpt-academic-nolocal-vs
|
||||||
|
FROM python:3.11
|
||||||
|
|
||||||
|
# 指定路径
|
||||||
|
WORKDIR /gpt
|
||||||
|
|
||||||
|
# 装载项目文件
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# 安装依赖
|
||||||
|
RUN pip3 install -r requirements.txt
|
||||||
|
|
||||||
|
# 安装知识库插件的额外依赖
|
||||||
|
RUN apt-get update && apt-get install libgl1 -y
|
||||||
|
RUN pip3 install torch torchvision --index-url https://download.pytorch.org/whl/cpu
|
||||||
|
RUN pip3 install transformers protobuf langchain sentence-transformers faiss-cpu nltk beautifulsoup4 bitsandbytes tabulate icetk --upgrade
|
||||||
|
RUN pip3 install unstructured[all-docs] --upgrade
|
||||||
|
RUN python3 -c 'from check_proxy import warm_up_vectordb; warm_up_vectordb()'
|
||||||
|
|
||||||
|
# 可选步骤,用于预热模块
|
||||||
|
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||||
|
|
||||||
|
# 启动
|
||||||
|
CMD ["python3", "-u", "main.py"]
|
||||||
@@ -341,4 +341,3 @@ https://github.com/oobabooga/one-click-installers
|
|||||||
# المزيد:
|
# المزيد:
|
||||||
https://github.com/gradio-app/gradio
|
https://github.com/gradio-app/gradio
|
||||||
https://github.com/fghrsh/live2d_demo
|
https://github.com/fghrsh/live2d_demo
|
||||||
|
|
||||||
|
|||||||
@@ -355,4 +355,3 @@ https://github.com/oobabooga/one-click-installers
|
|||||||
# More:
|
# More:
|
||||||
https://github.com/gradio-app/gradio
|
https://github.com/gradio-app/gradio
|
||||||
https://github.com/fghrsh/live2d_demo
|
https://github.com/fghrsh/live2d_demo
|
||||||
|
|
||||||
|
|||||||
@@ -354,4 +354,3 @@ https://github.com/oobabooga/one-click-installers
|
|||||||
# Plus:
|
# Plus:
|
||||||
https://github.com/gradio-app/gradio
|
https://github.com/gradio-app/gradio
|
||||||
https://github.com/fghrsh/live2d_demo
|
https://github.com/fghrsh/live2d_demo
|
||||||
|
|
||||||
|
|||||||
@@ -361,4 +361,3 @@ https://github.com/oobabooga/one-click-installers
|
|||||||
# Weitere:
|
# Weitere:
|
||||||
https://github.com/gradio-app/gradio
|
https://github.com/gradio-app/gradio
|
||||||
https://github.com/fghrsh/live2d_demo
|
https://github.com/fghrsh/live2d_demo
|
||||||
|
|
||||||
|
|||||||
@@ -358,4 +358,3 @@ https://github.com/oobabooga/one-click-installers
|
|||||||
# Altre risorse:
|
# Altre risorse:
|
||||||
https://github.com/gradio-app/gradio
|
https://github.com/gradio-app/gradio
|
||||||
https://github.com/fghrsh/live2d_demo
|
https://github.com/fghrsh/live2d_demo
|
||||||
|
|
||||||
|
|||||||
@@ -342,4 +342,3 @@ https://github.com/oobabooga/one-click-installers
|
|||||||
# その他:
|
# その他:
|
||||||
https://github.com/gradio-app/gradio
|
https://github.com/gradio-app/gradio
|
||||||
https://github.com/fghrsh/live2d_demo
|
https://github.com/fghrsh/live2d_demo
|
||||||
|
|
||||||
|
|||||||
@@ -361,4 +361,3 @@ https://github.com/oobabooga/one-click-installers
|
|||||||
# 더보기:
|
# 더보기:
|
||||||
https://github.com/gradio-app/gradio
|
https://github.com/gradio-app/gradio
|
||||||
https://github.com/fghrsh/live2d_demo
|
https://github.com/fghrsh/live2d_demo
|
||||||
|
|
||||||
|
|||||||
@@ -355,4 +355,3 @@ https://github.com/oobabooga/instaladores-de-um-clique
|
|||||||
# Mais:
|
# Mais:
|
||||||
https://github.com/gradio-app/gradio
|
https://github.com/gradio-app/gradio
|
||||||
https://github.com/fghrsh/live2d_demo
|
https://github.com/fghrsh/live2d_demo
|
||||||
|
|
||||||
|
|||||||
@@ -358,4 +358,3 @@ https://github.com/oobabooga/one-click-installers
|
|||||||
# Больше:
|
# Больше:
|
||||||
https://github.com/gradio-app/gradio
|
https://github.com/gradio-app/gradio
|
||||||
https://github.com/fghrsh/live2d_demo
|
https://github.com/fghrsh/live2d_demo
|
||||||
|
|
||||||
|
|||||||
二进制文件未显示。
@@ -165,7 +165,7 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和
|
|||||||
|
|
||||||
3. read_file_to_chat(chatbot, history, file_name):从传入的文件中读取内容,解析出对话历史记录并更新聊天显示框。
|
3. read_file_to_chat(chatbot, history, file_name):从传入的文件中读取内容,解析出对话历史记录并更新聊天显示框。
|
||||||
|
|
||||||
4. 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):一个主要函数,用于保存当前对话记录并提醒用户。如果用户希望加载历史记录,则调用read_file_to_chat()来更新聊天显示框。如果用户希望删除历史记录,调用删除所有本地对话历史记录()函数完成删除操作。
|
4. 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):一个主要函数,用于保存当前对话记录并提醒用户。如果用户希望加载历史记录,则调用read_file_to_chat()来更新聊天显示框。如果用户希望删除历史记录,调用删除所有本地对话历史记录()函数完成删除操作。
|
||||||
|
|
||||||
## [19/48] 请对下面的程序文件做一个概述: crazy_functions\总结word文档.py
|
## [19/48] 请对下面的程序文件做一个概述: crazy_functions\总结word文档.py
|
||||||
|
|
||||||
|
|||||||
@@ -7,13 +7,27 @@ sample = """
|
|||||||
"""
|
"""
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
|
||||||
def preprocess_newbing_out(s):
|
def preprocess_newbing_out(s):
|
||||||
pattern = r'\^(\d+)\^' # 匹配^数字^
|
pattern = r"\^(\d+)\^" # 匹配^数字^
|
||||||
pattern2 = r'\[(\d+)\]' # 匹配^数字^
|
pattern2 = r"\[(\d+)\]" # 匹配^数字^
|
||||||
sub = lambda m: '\['+m.group(1)+'\]' # 将匹配到的数字作为替换值
|
|
||||||
result = re.sub(pattern, sub, s) # 替换操作
|
def sub(m):
|
||||||
if '[1]' in result:
|
return "\\[" + m.group(1) + "\\]" # 将匹配到的数字作为替换值
|
||||||
result += '<br/><hr style="border-top: dotted 1px #44ac5c;"><br/><small>' + "<br/>".join([re.sub(pattern2, sub, r) for r in result.split('\n') if r.startswith('[')]) + '</small>'
|
|
||||||
|
result = re.sub(pattern, sub, s) # 替换操作
|
||||||
|
if "[1]" in result:
|
||||||
|
result += (
|
||||||
|
'<br/><hr style="border-top: dotted 1px #44ac5c;"><br/><small>'
|
||||||
|
+ "<br/>".join(
|
||||||
|
[
|
||||||
|
re.sub(pattern2, sub, r)
|
||||||
|
for r in result.split("\n")
|
||||||
|
if r.startswith("[")
|
||||||
|
]
|
||||||
|
)
|
||||||
|
+ "</small>"
|
||||||
|
)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
@@ -28,37 +42,39 @@ def close_up_code_segment_during_stream(gpt_reply):
|
|||||||
str: 返回一个新的字符串,将输出代码片段的“后面的```”补上。
|
str: 返回一个新的字符串,将输出代码片段的“后面的```”补上。
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if '```' not in gpt_reply:
|
if "```" not in gpt_reply:
|
||||||
return gpt_reply
|
return gpt_reply
|
||||||
if gpt_reply.endswith('```'):
|
if gpt_reply.endswith("```"):
|
||||||
return gpt_reply
|
return gpt_reply
|
||||||
|
|
||||||
# 排除了以上两个情况,我们
|
# 排除了以上两个情况,我们
|
||||||
segments = gpt_reply.split('```')
|
segments = gpt_reply.split("```")
|
||||||
n_mark = len(segments) - 1
|
n_mark = len(segments) - 1
|
||||||
if n_mark % 2 == 1:
|
if n_mark % 2 == 1:
|
||||||
# print('输出代码片段中!')
|
# print('输出代码片段中!')
|
||||||
return gpt_reply+'\n```'
|
return gpt_reply + "\n```"
|
||||||
else:
|
else:
|
||||||
return gpt_reply
|
return gpt_reply
|
||||||
|
|
||||||
|
|
||||||
import markdown
|
import markdown
|
||||||
from latex2mathml.converter import convert as tex2mathml
|
from latex2mathml.converter import convert as tex2mathml
|
||||||
from functools import wraps, lru_cache
|
|
||||||
|
|
||||||
def markdown_convertion(txt):
|
def markdown_convertion(txt):
|
||||||
"""
|
"""
|
||||||
将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
|
将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
|
||||||
"""
|
"""
|
||||||
pre = '<div class="markdown-body">'
|
pre = '<div class="markdown-body">'
|
||||||
suf = '</div>'
|
suf = "</div>"
|
||||||
if txt.startswith(pre) and txt.endswith(suf):
|
if txt.startswith(pre) and txt.endswith(suf):
|
||||||
# print('警告,输入了已经经过转化的字符串,二次转化可能出问题')
|
# print('警告,输入了已经经过转化的字符串,二次转化可能出问题')
|
||||||
return txt # 已经被转化过,不需要再次转化
|
return txt # 已经被转化过,不需要再次转化
|
||||||
|
|
||||||
markdown_extension_configs = {
|
markdown_extension_configs = {
|
||||||
'mdx_math': {
|
"mdx_math": {
|
||||||
'enable_dollar_delimiter': True,
|
"enable_dollar_delimiter": True,
|
||||||
'use_gitlab_delimiters': False,
|
"use_gitlab_delimiters": False,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
|
find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
|
||||||
@@ -72,19 +88,19 @@ def markdown_convertion(txt):
|
|||||||
|
|
||||||
def replace_math_no_render(match):
|
def replace_math_no_render(match):
|
||||||
content = match.group(1)
|
content = match.group(1)
|
||||||
if 'mode=display' in match.group(0):
|
if "mode=display" in match.group(0):
|
||||||
content = content.replace('\n', '</br>')
|
content = content.replace("\n", "</br>")
|
||||||
return f"<font color=\"#00FF00\">$$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$$</font>"
|
return f'<font color="#00FF00">$$</font><font color="#FF00FF">{content}</font><font color="#00FF00">$$</font>'
|
||||||
else:
|
else:
|
||||||
return f"<font color=\"#00FF00\">$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$</font>"
|
return f'<font color="#00FF00">$</font><font color="#FF00FF">{content}</font><font color="#00FF00">$</font>'
|
||||||
|
|
||||||
def replace_math_render(match):
|
def replace_math_render(match):
|
||||||
content = match.group(1)
|
content = match.group(1)
|
||||||
if 'mode=display' in match.group(0):
|
if "mode=display" in match.group(0):
|
||||||
if '\\begin{aligned}' in content:
|
if "\\begin{aligned}" in content:
|
||||||
content = content.replace('\\begin{aligned}', '\\begin{array}')
|
content = content.replace("\\begin{aligned}", "\\begin{array}")
|
||||||
content = content.replace('\\end{aligned}', '\\end{array}')
|
content = content.replace("\\end{aligned}", "\\end{array}")
|
||||||
content = content.replace('&', ' ')
|
content = content.replace("&", " ")
|
||||||
content = tex2mathml_catch_exception(content, display="block")
|
content = tex2mathml_catch_exception(content, display="block")
|
||||||
return content
|
return content
|
||||||
else:
|
else:
|
||||||
@@ -94,37 +110,58 @@ def markdown_convertion(txt):
|
|||||||
"""
|
"""
|
||||||
解决一个mdx_math的bug(单$包裹begin命令时多余<script>)
|
解决一个mdx_math的bug(单$包裹begin命令时多余<script>)
|
||||||
"""
|
"""
|
||||||
content = content.replace('<script type="math/tex">\n<script type="math/tex; mode=display">', '<script type="math/tex; mode=display">')
|
content = content.replace(
|
||||||
content = content.replace('</script>\n</script>', '</script>')
|
'<script type="math/tex">\n<script type="math/tex; mode=display">',
|
||||||
|
'<script type="math/tex; mode=display">',
|
||||||
|
)
|
||||||
|
content = content.replace("</script>\n</script>", "</script>")
|
||||||
return content
|
return content
|
||||||
|
|
||||||
|
if ("$" in txt) and ("```" not in txt): # 有$标识的公式符号,且没有代码段```的标识
|
||||||
if ('$' in txt) and ('```' not in txt): # 有$标识的公式符号,且没有代码段```的标识
|
|
||||||
# convert everything to html format
|
# convert everything to html format
|
||||||
split = markdown.markdown(text='---')
|
split = markdown.markdown(text="---")
|
||||||
convert_stage_1 = markdown.markdown(text=txt, extensions=['mdx_math', 'fenced_code', 'tables', 'sane_lists'], extension_configs=markdown_extension_configs)
|
convert_stage_1 = markdown.markdown(
|
||||||
|
text=txt,
|
||||||
|
extensions=["mdx_math", "fenced_code", "tables", "sane_lists"],
|
||||||
|
extension_configs=markdown_extension_configs,
|
||||||
|
)
|
||||||
convert_stage_1 = markdown_bug_hunt(convert_stage_1)
|
convert_stage_1 = markdown_bug_hunt(convert_stage_1)
|
||||||
# re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s).
|
# re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s).
|
||||||
# 1. convert to easy-to-copy tex (do not render math)
|
# 1. convert to easy-to-copy tex (do not render math)
|
||||||
convert_stage_2_1, n = re.subn(find_equation_pattern, replace_math_no_render, convert_stage_1, flags=re.DOTALL)
|
convert_stage_2_1, n = re.subn(
|
||||||
|
find_equation_pattern,
|
||||||
|
replace_math_no_render,
|
||||||
|
convert_stage_1,
|
||||||
|
flags=re.DOTALL,
|
||||||
|
)
|
||||||
# 2. convert to rendered equation
|
# 2. convert to rendered equation
|
||||||
convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL)
|
convert_stage_2_2, n = re.subn(
|
||||||
|
find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL
|
||||||
|
)
|
||||||
# cat them together
|
# cat them together
|
||||||
return pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf
|
return pre + convert_stage_2_1 + f"{split}" + convert_stage_2_2 + suf
|
||||||
else:
|
else:
|
||||||
return pre + markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists']) + suf
|
return (
|
||||||
|
pre
|
||||||
|
+ markdown.markdown(
|
||||||
|
txt, extensions=["fenced_code", "codehilite", "tables", "sane_lists"]
|
||||||
|
)
|
||||||
|
+ suf
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
sample = preprocess_newbing_out(sample)
|
sample = preprocess_newbing_out(sample)
|
||||||
sample = close_up_code_segment_during_stream(sample)
|
sample = close_up_code_segment_during_stream(sample)
|
||||||
sample = markdown_convertion(sample)
|
sample = markdown_convertion(sample)
|
||||||
with open('tmp.html', 'w', encoding='utf8') as f:
|
with open("tmp.html", "w", encoding="utf8") as f:
|
||||||
f.write("""
|
f.write(
|
||||||
|
"""
|
||||||
|
|
||||||
<head>
|
<head>
|
||||||
<title>My Website</title>
|
<title>My Website</title>
|
||||||
<link rel="stylesheet" type="text/css" href="style.css">
|
<link rel="stylesheet" type="text/css" href="style.css">
|
||||||
</head>
|
</head>
|
||||||
|
|
||||||
""")
|
"""
|
||||||
|
)
|
||||||
f.write(sample)
|
f.write(sample)
|
||||||
|
|||||||
@@ -923,7 +923,7 @@
|
|||||||
"的第": "The",
|
"的第": "The",
|
||||||
"个片段": "fragment",
|
"个片段": "fragment",
|
||||||
"总结文章": "Summarize the article",
|
"总结文章": "Summarize the article",
|
||||||
"根据以上的对话": "According to the above dialogue",
|
"根据以上的对话": "According to the conversation above",
|
||||||
"的主要内容": "The main content of",
|
"的主要内容": "The main content of",
|
||||||
"所有文件都总结完成了吗": "Are all files summarized?",
|
"所有文件都总结完成了吗": "Are all files summarized?",
|
||||||
"如果是.doc文件": "If it is a .doc file",
|
"如果是.doc文件": "If it is a .doc file",
|
||||||
@@ -1501,7 +1501,7 @@
|
|||||||
"发送请求到OpenAI后": "After sending the request to OpenAI",
|
"发送请求到OpenAI后": "After sending the request to OpenAI",
|
||||||
"上下布局": "Vertical Layout",
|
"上下布局": "Vertical Layout",
|
||||||
"左右布局": "Horizontal Layout",
|
"左右布局": "Horizontal Layout",
|
||||||
"对话窗的高度": "Height of the Dialogue Window",
|
"对话窗的高度": "Height of the Conversation Window",
|
||||||
"重试的次数限制": "Retry Limit",
|
"重试的次数限制": "Retry Limit",
|
||||||
"gpt4现在只对申请成功的人开放": "GPT-4 is now only open to those who have successfully applied",
|
"gpt4现在只对申请成功的人开放": "GPT-4 is now only open to those who have successfully applied",
|
||||||
"提高限制请查询": "Please check for higher limits",
|
"提高限制请查询": "Please check for higher limits",
|
||||||
@@ -2183,9 +2183,8 @@
|
|||||||
"找不到合适插件执行该任务": "Cannot find a suitable plugin to perform this task",
|
"找不到合适插件执行该任务": "Cannot find a suitable plugin to perform this task",
|
||||||
"接驳VoidTerminal": "Connect to VoidTerminal",
|
"接驳VoidTerminal": "Connect to VoidTerminal",
|
||||||
"**很好": "**Very good",
|
"**很好": "**Very good",
|
||||||
"对话|编程": "Conversation|Programming",
|
"对话|编程": "Conversation&ImageGenerating|Programming",
|
||||||
"对话|编程|学术": "Conversation|Programming|Academic",
|
"对话|编程|学术": "Conversation&ImageGenerating|Programming|Academic", "4. 建议使用 GPT3.5 或更强的模型": "4. It is recommended to use GPT3.5 or a stronger model",
|
||||||
"4. 建议使用 GPT3.5 或更强的模型": "4. It is recommended to use GPT3.5 or a stronger model",
|
|
||||||
"「请调用插件翻译PDF论文": "Please call the plugin to translate the PDF paper",
|
"「请调用插件翻译PDF论文": "Please call the plugin to translate the PDF paper",
|
||||||
"3. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词": "3. If you use keywords such as 'call plugin xxx', 'modify configuration xxx', 'please', etc.",
|
"3. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词": "3. If you use keywords such as 'call plugin xxx', 'modify configuration xxx', 'please', etc.",
|
||||||
"以下是一篇学术论文的基本信息": "The following is the basic information of an academic paper",
|
"以下是一篇学术论文的基本信息": "The following is the basic information of an academic paper",
|
||||||
@@ -2630,7 +2629,7 @@
|
|||||||
"已经被记忆": "Already memorized",
|
"已经被记忆": "Already memorized",
|
||||||
"默认用英文的": "Default to English",
|
"默认用英文的": "Default to English",
|
||||||
"错误追踪": "Error tracking",
|
"错误追踪": "Error tracking",
|
||||||
"对话|编程|学术|智能体": "Dialogue|Programming|Academic|Intelligent agent",
|
"对话&编程|编程|学术|智能体": "Conversation&ImageGenerating|Programming|Academic|Intelligent agent",
|
||||||
"请检查": "Please check",
|
"请检查": "Please check",
|
||||||
"检测到被滞留的缓存文档": "Detected cached documents being left behind",
|
"检测到被滞留的缓存文档": "Detected cached documents being left behind",
|
||||||
"还有哪些场合允许使用代理": "What other occasions allow the use of proxies",
|
"还有哪些场合允许使用代理": "What other occasions allow the use of proxies",
|
||||||
@@ -2864,7 +2863,7 @@
|
|||||||
"加载API_KEY": "Loading API_KEY",
|
"加载API_KEY": "Loading API_KEY",
|
||||||
"协助您编写代码": "Assist you in writing code",
|
"协助您编写代码": "Assist you in writing code",
|
||||||
"我可以为您提供以下服务": "I can provide you with the following services",
|
"我可以为您提供以下服务": "I can provide you with the following services",
|
||||||
"排队中请稍后 ...": "Please wait in line ...",
|
"排队中请稍候 ...": "Please wait in line ...",
|
||||||
"建议您使用英文提示词": "It is recommended to use English prompts",
|
"建议您使用英文提示词": "It is recommended to use English prompts",
|
||||||
"不能支撑AutoGen运行": "Cannot support AutoGen operation",
|
"不能支撑AutoGen运行": "Cannot support AutoGen operation",
|
||||||
"帮助您解决编程问题": "Help you solve programming problems",
|
"帮助您解决编程问题": "Help you solve programming problems",
|
||||||
@@ -2903,5 +2902,107 @@
|
|||||||
"高优先级": "High priority",
|
"高优先级": "High priority",
|
||||||
"请配置ZHIPUAI_API_KEY": "Please configure ZHIPUAI_API_KEY",
|
"请配置ZHIPUAI_API_KEY": "Please configure ZHIPUAI_API_KEY",
|
||||||
"单个azure模型": "Single Azure model",
|
"单个azure模型": "Single Azure model",
|
||||||
"预留参数 context 未实现": "Reserved parameter 'context' not implemented"
|
"预留参数 context 未实现": "Reserved parameter 'context' not implemented",
|
||||||
|
"在输入区输入临时API_KEY后提交": "Submit after entering temporary API_KEY in the input area",
|
||||||
|
"鸟": "Bird",
|
||||||
|
"图片中需要修改的位置用橡皮擦擦除为纯白色": "Erase the areas in the image that need to be modified with an eraser to pure white",
|
||||||
|
"└── PDF文档精准解析": "└── Accurate parsing of PDF documents",
|
||||||
|
"└── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置": "└── ALLOW_RESET_CONFIG Whether to allow modifying the configuration of this page through natural language description",
|
||||||
|
"等待指令": "Waiting for instructions",
|
||||||
|
"不存在": "Does not exist",
|
||||||
|
"选择游戏": "Select game",
|
||||||
|
"本地大模型示意图": "Local large model diagram",
|
||||||
|
"无视此消息即可": "You can ignore this message",
|
||||||
|
"即RGB=255": "That is, RGB=255",
|
||||||
|
"如需追问": "If you have further questions",
|
||||||
|
"也可以是具体的模型路径": "It can also be a specific model path",
|
||||||
|
"才会起作用": "Will take effect",
|
||||||
|
"下载失败": "Download failed",
|
||||||
|
"网页刷新后失效": "Invalid after webpage refresh",
|
||||||
|
"crazy_functions.互动小游戏-": "crazy_functions.Interactive mini game-",
|
||||||
|
"右对齐": "Right alignment",
|
||||||
|
"您可以调用下拉菜单中的“LoadConversationHistoryArchive”还原当下的对话": "You can use the 'LoadConversationHistoryArchive' in the drop-down menu to restore the current conversation",
|
||||||
|
"左对齐": "Left alignment",
|
||||||
|
"使用默认的 FP16": "Use default FP16",
|
||||||
|
"一小时": "One hour",
|
||||||
|
"从而方便内存的释放": "Thus facilitating memory release",
|
||||||
|
"如何临时更换API_KEY": "How to temporarily change API_KEY",
|
||||||
|
"请输入 1024x1024-HD": "Please enter 1024x1024-HD",
|
||||||
|
"使用 INT8 量化": "Use INT8 quantization",
|
||||||
|
"3. 输入修改需求": "3. Enter modification requirements",
|
||||||
|
"刷新界面 由于请求gpt需要一段时间": "Refreshing the interface takes some time due to the request for gpt",
|
||||||
|
"随机小游戏": "Random mini game",
|
||||||
|
"那么请在下面的QWEN_MODEL_SELECTION中指定具体的模型": "So please specify the specific model in QWEN_MODEL_SELECTION below",
|
||||||
|
"表值": "Table value",
|
||||||
|
"我画你猜": "I draw, you guess",
|
||||||
|
"狗": "Dog",
|
||||||
|
"2. 输入分辨率": "2. Enter resolution",
|
||||||
|
"鱼": "Fish",
|
||||||
|
"尚未完成": "Not yet completed",
|
||||||
|
"表头": "Table header",
|
||||||
|
"填localhost或者127.0.0.1": "Fill in localhost or 127.0.0.1",
|
||||||
|
"请上传jpg格式的图片": "Please upload images in jpg format",
|
||||||
|
"API_URL_REDIRECT填写格式是错误的": "The format of API_URL_REDIRECT is incorrect",
|
||||||
|
"├── RWKV的支持见Wiki": "Support for RWKV is available in the Wiki",
|
||||||
|
"如果中文Prompt效果不理想": "If the Chinese prompt is not effective",
|
||||||
|
"/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix": "/SEAFILE_LOCAL/50503047/My Library/Degree/paperlatex/aaai/Fu_8368_with_appendix",
|
||||||
|
"只有当AVAIL_LLM_MODELS包含了对应本地模型时": "Only when AVAIL_LLM_MODELS contains the corresponding local model",
|
||||||
|
"选择本地模型变体": "Choose the local model variant",
|
||||||
|
"如果您确信自己没填错": "If you are sure you haven't made a mistake",
|
||||||
|
"PyPDF2这个库有严重的内存泄露问题": "PyPDF2 library has serious memory leak issues",
|
||||||
|
"整理文件集合 输出消息": "Organize file collection and output message",
|
||||||
|
"没有检测到任何近期上传的图像文件": "No recently uploaded image files detected",
|
||||||
|
"游戏结束": "Game over",
|
||||||
|
"调用结束": "Call ended",
|
||||||
|
"猫": "Cat",
|
||||||
|
"请及时切换模型": "Please switch models in time",
|
||||||
|
"次中": "In the meantime",
|
||||||
|
"如需生成高清图像": "If you need to generate high-definition images",
|
||||||
|
"CPU 模式": "CPU mode",
|
||||||
|
"项目目录": "Project directory",
|
||||||
|
"动物": "Animal",
|
||||||
|
"居中对齐": "Center alignment",
|
||||||
|
"请注意拓展名需要小写": "Please note that the extension name needs to be lowercase",
|
||||||
|
"重试第": "Retry",
|
||||||
|
"实验性功能": "Experimental feature",
|
||||||
|
"猜错了": "Wrong guess",
|
||||||
|
"打开你的代理软件查看代理协议": "Open your proxy software to view the proxy agreement",
|
||||||
|
"您不需要再重复强调该文件的路径了": "You don't need to emphasize the file path again",
|
||||||
|
"请阅读": "Please read",
|
||||||
|
"请直接输入您的问题": "Please enter your question directly",
|
||||||
|
"API_URL_REDIRECT填错了": "API_URL_REDIRECT is filled incorrectly",
|
||||||
|
"谜底是": "The answer is",
|
||||||
|
"第一个模型": "The first model",
|
||||||
|
"你猜对了!": "You guessed it right!",
|
||||||
|
"已经接收到您上传的文件": "The file you uploaded has been received",
|
||||||
|
"您正在调用“图像生成”插件": "You are calling the 'Image Generation' plugin",
|
||||||
|
"刷新界面 界面更新": "Refresh the interface, interface update",
|
||||||
|
"如果之前已经初始化了游戏实例": "If the game instance has been initialized before",
|
||||||
|
"文件": "File",
|
||||||
|
"老鼠": "Mouse",
|
||||||
|
"列2": "Column 2",
|
||||||
|
"等待图片": "Waiting for image",
|
||||||
|
"使用 INT4 量化": "Use INT4 quantization",
|
||||||
|
"from crazy_functions.互动小游戏 import 随机小游戏": "TranslatedText",
|
||||||
|
"游戏主体": "TranslatedText",
|
||||||
|
"该模型不具备上下文对话能力": "TranslatedText",
|
||||||
|
"列3": "TranslatedText",
|
||||||
|
"清理": "TranslatedText",
|
||||||
|
"检查量化配置": "TranslatedText",
|
||||||
|
"如果游戏结束": "TranslatedText",
|
||||||
|
"蛇": "TranslatedText",
|
||||||
|
"则继续该实例;否则重新初始化": "TranslatedText",
|
||||||
|
"e.g. cat and 猫 are the same thing": "TranslatedText",
|
||||||
|
"第三个模型": "TranslatedText",
|
||||||
|
"如果你选择Qwen系列的模型": "TranslatedText",
|
||||||
|
"列4": "TranslatedText",
|
||||||
|
"输入“exit”获取答案": "TranslatedText",
|
||||||
|
"把它放到子进程中运行": "TranslatedText",
|
||||||
|
"列1": "TranslatedText",
|
||||||
|
"使用该模型需要额外依赖": "TranslatedText",
|
||||||
|
"再试试": "TranslatedText",
|
||||||
|
"1. 上传图片": "TranslatedText",
|
||||||
|
"保存状态": "TranslatedText",
|
||||||
|
"GPT-Academic对话存档": "TranslatedText",
|
||||||
|
"Arxiv论文精细翻译": "TranslatedText"
|
||||||
}
|
}
|
||||||
@@ -1043,9 +1043,9 @@
|
|||||||
"jittorllms响应异常": "jittorllms response exception",
|
"jittorllms响应异常": "jittorllms response exception",
|
||||||
"在项目根目录运行这两个指令": "Run these two commands in the project root directory",
|
"在项目根目录运行这两个指令": "Run these two commands in the project root directory",
|
||||||
"获取tokenizer": "Get tokenizer",
|
"获取tokenizer": "Get tokenizer",
|
||||||
"chatbot 为WebUI中显示的对话列表": "chatbot is the list of dialogues displayed in WebUI",
|
"chatbot 为WebUI中显示的对话列表": "chatbot is the list of conversations displayed in WebUI",
|
||||||
"test_解析一个Cpp项目": "test_parse a Cpp project",
|
"test_解析一个Cpp项目": "test_parse a Cpp project",
|
||||||
"将对话记录history以Markdown格式写入文件中": "Write the dialogue record history to a file in Markdown format",
|
"将对话记录history以Markdown格式写入文件中": "Write the conversations record history to a file in Markdown format",
|
||||||
"装饰器函数": "Decorator function",
|
"装饰器函数": "Decorator function",
|
||||||
"玫瑰色": "Rose color",
|
"玫瑰色": "Rose color",
|
||||||
"将单空行": "刪除單行空白",
|
"将单空行": "刪除單行空白",
|
||||||
|
|||||||
@@ -61,4 +61,3 @@ VI 两种音频监听模式切换时,需要刷新页面才有效。
|
|||||||
VII 非localhost运行+非https情况下无法打开录音功能的坑:https://blog.csdn.net/weixin_39461487/article/details/109594434
|
VII 非localhost运行+非https情况下无法打开录音功能的坑:https://blog.csdn.net/weixin_39461487/article/details/109594434
|
||||||
|
|
||||||
## 5.点击函数插件区“实时音频采集” 或者其他音频交互功能
|
## 5.点击函数插件区“实时音频采集” 或者其他音频交互功能
|
||||||
|
|
||||||
|
|||||||
@@ -258,39 +258,7 @@ function loadTipsMessage(result) {
|
|||||||
});
|
});
|
||||||
|
|
||||||
window.showWelcomeMessage = function(result) {
|
window.showWelcomeMessage = function(result) {
|
||||||
var text;
|
showMessage('欢迎使用GPT-Academic', 6000);
|
||||||
if (window.location.href == live2d_settings.homePageUrl) {
|
|
||||||
var now = (new Date()).getHours();
|
|
||||||
if (now > 23 || now <= 5) text = getRandText(result.waifu.hour_tips['t23-5']);
|
|
||||||
else if (now > 5 && now <= 7) text = getRandText(result.waifu.hour_tips['t5-7']);
|
|
||||||
else if (now > 7 && now <= 11) text = getRandText(result.waifu.hour_tips['t7-11']);
|
|
||||||
else if (now > 11 && now <= 14) text = getRandText(result.waifu.hour_tips['t11-14']);
|
|
||||||
else if (now > 14 && now <= 17) text = getRandText(result.waifu.hour_tips['t14-17']);
|
|
||||||
else if (now > 17 && now <= 19) text = getRandText(result.waifu.hour_tips['t17-19']);
|
|
||||||
else if (now > 19 && now <= 21) text = getRandText(result.waifu.hour_tips['t19-21']);
|
|
||||||
else if (now > 21 && now <= 23) text = getRandText(result.waifu.hour_tips['t21-23']);
|
|
||||||
else text = getRandText(result.waifu.hour_tips.default);
|
|
||||||
} else {
|
|
||||||
var referrer_message = result.waifu.referrer_message;
|
|
||||||
if (document.referrer !== '') {
|
|
||||||
var referrer = document.createElement('a');
|
|
||||||
referrer.href = document.referrer;
|
|
||||||
var domain = referrer.hostname.split('.')[1];
|
|
||||||
if (window.location.hostname == referrer.hostname)
|
|
||||||
text = referrer_message.localhost[0] + document.title.split(referrer_message.localhost[2])[0] + referrer_message.localhost[1];
|
|
||||||
else if (domain == 'baidu')
|
|
||||||
text = referrer_message.baidu[0] + referrer.search.split('&wd=')[1].split('&')[0] + referrer_message.baidu[1];
|
|
||||||
else if (domain == 'so')
|
|
||||||
text = referrer_message.so[0] + referrer.search.split('&q=')[1].split('&')[0] + referrer_message.so[1];
|
|
||||||
else if (domain == 'google')
|
|
||||||
text = referrer_message.google[0] + document.title.split(referrer_message.google[2])[0] + referrer_message.google[1];
|
|
||||||
else {
|
|
||||||
$.each(result.waifu.referrer_hostname, function(i,val) {if (i==referrer.hostname) referrer.hostname = getRandText(val)});
|
|
||||||
text = referrer_message.default[0] + referrer.hostname + referrer_message.default[1];
|
|
||||||
}
|
|
||||||
} else text = referrer_message.none[0] + document.title.split(referrer_message.none[2])[0] + referrer_message.none[1];
|
|
||||||
}
|
|
||||||
showMessage(text, 6000);
|
|
||||||
}; if (live2d_settings.showWelcomeMessage) showWelcomeMessage(result);
|
}; if (live2d_settings.showWelcomeMessage) showWelcomeMessage(result);
|
||||||
|
|
||||||
var waifu_tips = result.waifu;
|
var waifu_tips = result.waifu;
|
||||||
|
|||||||
@@ -83,8 +83,8 @@
|
|||||||
"很多强大的函数插件隐藏在下拉菜单中呢。",
|
"很多强大的函数插件隐藏在下拉菜单中呢。",
|
||||||
"红色的插件,使用之前需要把文件上传进去哦。",
|
"红色的插件,使用之前需要把文件上传进去哦。",
|
||||||
"想添加功能按钮吗?读读readme很容易就学会啦。",
|
"想添加功能按钮吗?读读readme很容易就学会啦。",
|
||||||
"敏感或机密的信息,不可以问chatGPT的哦!",
|
"敏感或机密的信息,不可以问AI的哦!",
|
||||||
"chatGPT究竟是划时代的创新,还是扼杀创造力的毒药呢?"
|
"LLM究竟是划时代的创新,还是扼杀创造力的毒药呢?"
|
||||||
] }
|
] }
|
||||||
],
|
],
|
||||||
"click": [
|
"click": [
|
||||||
@@ -92,8 +92,6 @@
|
|||||||
"selector": ".waifu #live2d",
|
"selector": ".waifu #live2d",
|
||||||
"text": [
|
"text": [
|
||||||
"是…是不小心碰到了吧",
|
"是…是不小心碰到了吧",
|
||||||
"萝莉控是什么呀",
|
|
||||||
"你看到我的小熊了吗",
|
|
||||||
"再摸的话我可要报警了!⌇●﹏●⌇",
|
"再摸的话我可要报警了!⌇●﹏●⌇",
|
||||||
"110吗,这里有个变态一直在摸我(ó﹏ò。)"
|
"110吗,这里有个变态一直在摸我(ó﹏ò。)"
|
||||||
]
|
]
|
||||||
|
|||||||
126
main.py
126
main.py
@@ -1,14 +1,25 @@
|
|||||||
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||||
import pickle
|
|
||||||
import base64
|
help_menu_description = \
|
||||||
|
"""Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic),
|
||||||
|
感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors).
|
||||||
|
</br></br>常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki),
|
||||||
|
如遇到Bug请前往[Bug反馈](https://github.com/binary-husky/gpt_academic/issues).
|
||||||
|
</br></br>普通对话使用说明: 1. 输入问题; 2. 点击提交
|
||||||
|
</br></br>基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮
|
||||||
|
</br></br>函数插件区使用说明: 1. 输入路径/问题, 或者上传文件; 2. 点击任意函数插件区按钮
|
||||||
|
</br></br>虚空终端使用说明: 点击虚空终端, 然后根据提示输入指令, 再次点击虚空终端
|
||||||
|
</br></br>如何保存对话: 点击保存当前的对话按钮
|
||||||
|
</br></br>如何语音对话: 请阅读Wiki
|
||||||
|
</br></br>如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)"""
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
if gr.__version__ not in ['3.32.6']:
|
if gr.__version__ not in ['3.32.6', '3.32.7']:
|
||||||
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
||||||
from request_llms.bridge_all import predict
|
from request_llms.bridge_all import predict
|
||||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
|
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
|
||||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址
|
||||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
||||||
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
||||||
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME')
|
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME')
|
||||||
@@ -18,20 +29,10 @@ def main():
|
|||||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||||
from check_proxy import get_current_version
|
from check_proxy import get_current_version
|
||||||
from themes.theme import adjust_theme, advanced_css, theme_declaration, load_dynamic_theme
|
from themes.theme import adjust_theme, advanced_css, theme_declaration
|
||||||
|
from themes.theme import js_code_for_css_changing, js_code_for_darkmode_init, js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init
|
||||||
|
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, init_cookie
|
||||||
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
|
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
|
||||||
description = "Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic), "
|
|
||||||
description += "感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors)."
|
|
||||||
description += "</br></br>常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki), "
|
|
||||||
description += "如遇到Bug请前往[Bug反馈](https://github.com/binary-husky/gpt_academic/issues)."
|
|
||||||
description += "</br></br>普通对话使用说明: 1. 输入问题; 2. 点击提交"
|
|
||||||
description += "</br></br>基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮"
|
|
||||||
description += "</br></br>函数插件区使用说明: 1. 输入路径/问题, 或者上传文件; 2. 点击任意函数插件区按钮"
|
|
||||||
description += "</br></br>虚空终端使用说明: 点击虚空终端, 然后根据提示输入指令, 再次点击虚空终端"
|
|
||||||
description += "</br></br>如何保存对话: 点击保存当前的对话按钮"
|
|
||||||
description += "</br></br>如何语音对话: 请阅读Wiki"
|
|
||||||
description += "</br></br>如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)"
|
|
||||||
|
|
||||||
# 问询记录, python 版本建议3.9+(越新越好)
|
# 问询记录, python 版本建议3.9+(越新越好)
|
||||||
import logging, uuid
|
import logging, uuid
|
||||||
@@ -85,7 +86,7 @@ def main():
|
|||||||
with gr_L2(scale=1, elem_id="gpt-panel"):
|
with gr_L2(scale=1, elem_id="gpt-panel"):
|
||||||
with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary:
|
with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary:
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
|
txt = gr.Textbox(show_label=False, placeholder="Input question here.", elem_id='user_input_main').style(container=False)
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
submitBtn = gr.Button("提交", elem_id="elem_submit", variant="primary")
|
submitBtn = gr.Button("提交", elem_id="elem_submit", variant="primary")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
@@ -138,17 +139,17 @@ def main():
|
|||||||
with gr.Row():
|
with gr.Row():
|
||||||
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm")
|
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Accordion("点击展开“文件上传区”。上传本地文件/压缩包供函数插件调用。", open=False) as area_file_up:
|
with gr.Accordion("点击展开“文件下载区”。", open=False) as area_file_up:
|
||||||
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
|
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
|
||||||
|
|
||||||
|
|
||||||
with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden"):
|
with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden", elem_id="tooltip"):
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Tab("上传文件", elem_id="interact-panel"):
|
with gr.Tab("上传文件", elem_id="interact-panel"):
|
||||||
gr.Markdown("请上传本地文件/压缩包供“函数插件区”功能调用。请注意: 上传文件后会自动把输入区修改为相应路径。")
|
gr.Markdown("请上传本地文件/压缩包供“函数插件区”功能调用。请注意: 上传文件后会自动把输入区修改为相应路径。")
|
||||||
file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple")
|
file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload_float")
|
||||||
|
|
||||||
with gr.Tab("更换模型 & Prompt", elem_id="interact-panel"):
|
with gr.Tab("更换模型", elem_id="interact-panel"):
|
||||||
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
|
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
|
||||||
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
||||||
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
||||||
@@ -160,41 +161,25 @@ def main():
|
|||||||
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"],
|
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"],
|
||||||
value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
|
value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
|
||||||
checkboxes_2 = gr.CheckboxGroup(["自定义菜单"],
|
checkboxes_2 = gr.CheckboxGroup(["自定义菜单"],
|
||||||
value=[], label="显示/隐藏自定义菜单", elem_id='cbs').style(container=False)
|
value=[], label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False)
|
||||||
dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
|
dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
|
||||||
dark_mode_btn.click(None, None, None, _js="""() => {
|
dark_mode_btn.click(None, None, None, _js=js_code_for_toggle_darkmode)
|
||||||
if (document.querySelectorAll('.dark').length) {
|
|
||||||
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
|
||||||
} else {
|
|
||||||
document.querySelector('body').classList.add('dark');
|
|
||||||
}
|
|
||||||
}""",
|
|
||||||
)
|
|
||||||
with gr.Tab("帮助", elem_id="interact-panel"):
|
with gr.Tab("帮助", elem_id="interact-panel"):
|
||||||
gr.Markdown(description)
|
gr.Markdown(help_menu_description)
|
||||||
|
|
||||||
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_input_secondary:
|
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_input_secondary:
|
||||||
with gr.Accordion("浮动输入区", open=True, elem_id="input-panel2"):
|
with gr.Accordion("浮动输入区", open=True, elem_id="input-panel2"):
|
||||||
with gr.Row() as row:
|
with gr.Row() as row:
|
||||||
row.style(equal_height=True)
|
row.style(equal_height=True)
|
||||||
with gr.Column(scale=10):
|
with gr.Column(scale=10):
|
||||||
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", lines=8, label="输入区2").style(container=False)
|
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.",
|
||||||
|
elem_id='user_input_float', lines=8, label="输入区2").style(container=False)
|
||||||
with gr.Column(scale=1, min_width=40):
|
with gr.Column(scale=1, min_width=40):
|
||||||
submitBtn2 = gr.Button("提交", variant="primary"); submitBtn2.style(size="sm")
|
submitBtn2 = gr.Button("提交", variant="primary"); submitBtn2.style(size="sm")
|
||||||
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
|
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
|
||||||
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
|
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
|
||||||
clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm")
|
clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm")
|
||||||
|
|
||||||
def to_cookie_str(d):
|
|
||||||
# Pickle the dictionary and encode it as a string
|
|
||||||
pickled_dict = pickle.dumps(d)
|
|
||||||
cookie_value = base64.b64encode(pickled_dict).decode('utf-8')
|
|
||||||
return cookie_value
|
|
||||||
|
|
||||||
def from_cookie_str(c):
|
|
||||||
# Decode the base64-encoded string and unpickle it into a dictionary
|
|
||||||
pickled_dict = base64.b64decode(c.encode('utf-8'))
|
|
||||||
return pickle.loads(pickled_dict)
|
|
||||||
|
|
||||||
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_customize:
|
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_customize:
|
||||||
with gr.Accordion("自定义菜单", open=True, elem_id="edit-panel"):
|
with gr.Accordion("自定义菜单", open=True, elem_id="edit-panel"):
|
||||||
@@ -226,11 +211,11 @@ def main():
|
|||||||
else:
|
else:
|
||||||
ret.update({predefined_btns[basic_btn_dropdown_]: gr.update(visible=True, value=basic_fn_title)})
|
ret.update({predefined_btns[basic_btn_dropdown_]: gr.update(visible=True, value=basic_fn_title)})
|
||||||
ret.update({cookies: cookies_})
|
ret.update({cookies: cookies_})
|
||||||
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
|
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
|
||||||
except: persistent_cookie_ = {}
|
except: persistent_cookie_ = {}
|
||||||
persistent_cookie_["custom_bnt"] = customize_fn_overwrite_ # dict update new value
|
persistent_cookie_["custom_bnt"] = customize_fn_overwrite_ # dict update new value
|
||||||
persistent_cookie_ = to_cookie_str(persistent_cookie_) # persistent cookie to dict
|
persistent_cookie_ = to_cookie_str(persistent_cookie_) # persistent cookie to dict
|
||||||
ret.update({persistent_cookie: persistent_cookie_}) # write persistent cookie
|
ret.update({persistent_cookie: persistent_cookie_}) # write persistent cookie
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def reflesh_btn(persistent_cookie_, cookies_):
|
def reflesh_btn(persistent_cookie_, cookies_):
|
||||||
@@ -251,10 +236,11 @@ def main():
|
|||||||
else: ret.update({predefined_btns[k]: gr.update(visible=True, value=v['Title'])})
|
else: ret.update({predefined_btns[k]: gr.update(visible=True, value=v['Title'])})
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
basic_fn_load.click(reflesh_btn, [persistent_cookie, cookies],[cookies, *customize_btns.values(), *predefined_btns.values()])
|
basic_fn_load.click(reflesh_btn, [persistent_cookie, cookies], [cookies, *customize_btns.values(), *predefined_btns.values()])
|
||||||
h = basic_fn_confirm.click(assign_btn, [persistent_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
|
h = basic_fn_confirm.click(assign_btn, [persistent_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
|
||||||
[persistent_cookie, cookies, *customize_btns.values(), *predefined_btns.values()])
|
[persistent_cookie, cookies, *customize_btns.values(), *predefined_btns.values()])
|
||||||
h.then(None, [persistent_cookie], None, _js="""(persistent_cookie)=>{setCookie("persistent_cookie", persistent_cookie, 5);}""") # save persistent cookie
|
# save persistent cookie
|
||||||
|
h.then(None, [persistent_cookie], None, _js="""(persistent_cookie)=>{setCookie("persistent_cookie", persistent_cookie, 5);}""")
|
||||||
|
|
||||||
# 功能区显示开关与功能区的互动
|
# 功能区显示开关与功能区的互动
|
||||||
def fn_area_visibility(a):
|
def fn_area_visibility(a):
|
||||||
@@ -304,8 +290,8 @@ def main():
|
|||||||
click_handle = btn.click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(btn.value)], outputs=output_combo)
|
click_handle = btn.click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(btn.value)], outputs=output_combo)
|
||||||
cancel_handles.append(click_handle)
|
cancel_handles.append(click_handle)
|
||||||
# 文件上传区,接收文件后与chatbot的互动
|
# 文件上传区,接收文件后与chatbot的互动
|
||||||
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
|
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}")
|
||||||
file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
|
file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}")
|
||||||
# 函数插件-固定按钮区
|
# 函数插件-固定按钮区
|
||||||
for k in plugins:
|
for k in plugins:
|
||||||
if not plugins[k].get("AsButton", True): continue
|
if not plugins[k].get("AsButton", True): continue
|
||||||
@@ -341,18 +327,7 @@ def main():
|
|||||||
None,
|
None,
|
||||||
[secret_css],
|
[secret_css],
|
||||||
None,
|
None,
|
||||||
_js="""(css) => {
|
_js=js_code_for_css_changing
|
||||||
var existingStyles = document.querySelectorAll("style[data-loaded-css]");
|
|
||||||
for (var i = 0; i < existingStyles.length; i++) {
|
|
||||||
var style = existingStyles[i];
|
|
||||||
style.parentNode.removeChild(style);
|
|
||||||
}
|
|
||||||
var styleElement = document.createElement('style');
|
|
||||||
styleElement.setAttribute('data-loaded-css', css);
|
|
||||||
styleElement.innerHTML = css;
|
|
||||||
document.head.appendChild(styleElement);
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
)
|
)
|
||||||
# 随变按钮的回调函数注册
|
# 随变按钮的回调函数注册
|
||||||
def route(request: gr.Request, k, *args, **kwargs):
|
def route(request: gr.Request, k, *args, **kwargs):
|
||||||
@@ -384,27 +359,10 @@ def main():
|
|||||||
rad.feed(cookies['uuid'].hex, audio)
|
rad.feed(cookies['uuid'].hex, audio)
|
||||||
audio_mic.stream(deal_audio, inputs=[audio_mic, cookies])
|
audio_mic.stream(deal_audio, inputs=[audio_mic, cookies])
|
||||||
|
|
||||||
def init_cookie(cookies, chatbot):
|
|
||||||
# 为每一位访问的用户赋予一个独一无二的uuid编码
|
|
||||||
cookies.update({'uuid': uuid.uuid4()})
|
|
||||||
return cookies
|
|
||||||
demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies])
|
demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies])
|
||||||
darkmode_js = """(dark) => {
|
darkmode_js = js_code_for_darkmode_init
|
||||||
dark = dark == "True";
|
demo.load(None, inputs=None, outputs=[persistent_cookie], _js=js_code_for_persistent_cookie_init)
|
||||||
if (document.querySelectorAll('.dark').length) {
|
|
||||||
if (!dark){
|
|
||||||
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (dark){
|
|
||||||
document.querySelector('body').classList.add('dark');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}"""
|
|
||||||
load_cookie_js = """(persistent_cookie) => {
|
|
||||||
return getCookie("persistent_cookie");
|
|
||||||
}"""
|
|
||||||
demo.load(None, inputs=None, outputs=[persistent_cookie], _js=load_cookie_js)
|
|
||||||
demo.load(None, inputs=[dark_mode], outputs=None, _js=darkmode_js) # 配置暗色主题或亮色主题
|
demo.load(None, inputs=[dark_mode], outputs=None, _js=darkmode_js) # 配置暗色主题或亮色主题
|
||||||
demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}')
|
demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}')
|
||||||
|
|
||||||
@@ -417,7 +375,7 @@ def main():
|
|||||||
|
|
||||||
def auto_updates(): time.sleep(0); auto_update()
|
def auto_updates(): time.sleep(0); auto_update()
|
||||||
def open_browser(): time.sleep(2); webbrowser.open_new_tab(f"http://localhost:{PORT}")
|
def open_browser(): time.sleep(2); webbrowser.open_new_tab(f"http://localhost:{PORT}")
|
||||||
def warm_up_mods(): time.sleep(4); warm_up_modules()
|
def warm_up_mods(): time.sleep(6); warm_up_modules()
|
||||||
|
|
||||||
threading.Thread(target=auto_updates, name="self-upgrade", daemon=True).start() # 查看自动更新
|
threading.Thread(target=auto_updates, name="self-upgrade", daemon=True).start() # 查看自动更新
|
||||||
threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面
|
threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面
|
||||||
|
|||||||
@@ -182,12 +182,12 @@ cached_translation = read_map_from_json(language=LANG)
|
|||||||
def trans(word_to_translate, language, special=False):
|
def trans(word_to_translate, language, special=False):
|
||||||
if len(word_to_translate) == 0: return {}
|
if len(word_to_translate) == 0: return {}
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||||
from toolbox import get_conf, ChatBotWithCookies
|
from toolbox import get_conf, ChatBotWithCookies, load_chat_cookies
|
||||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
|
||||||
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
cookies = load_chat_cookies()
|
||||||
llm_kwargs = {
|
llm_kwargs = {
|
||||||
'api_key': API_KEY,
|
'api_key': cookies['api_key'],
|
||||||
'llm_model': LLM_MODEL,
|
'llm_model': cookies['llm_model'],
|
||||||
'top_p':1.0,
|
'top_p':1.0,
|
||||||
'max_length': None,
|
'max_length': None,
|
||||||
'temperature':0.4,
|
'temperature':0.4,
|
||||||
@@ -245,15 +245,15 @@ def trans(word_to_translate, language, special=False):
|
|||||||
def trans_json(word_to_translate, language, special=False):
|
def trans_json(word_to_translate, language, special=False):
|
||||||
if len(word_to_translate) == 0: return {}
|
if len(word_to_translate) == 0: return {}
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||||
from toolbox import get_conf, ChatBotWithCookies
|
from toolbox import get_conf, ChatBotWithCookies, load_chat_cookies
|
||||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
|
||||||
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
cookies = load_chat_cookies()
|
||||||
llm_kwargs = {
|
llm_kwargs = {
|
||||||
'api_key': API_KEY,
|
'api_key': cookies['api_key'],
|
||||||
'llm_model': LLM_MODEL,
|
'llm_model': cookies['llm_model'],
|
||||||
'top_p':1.0,
|
'top_p':1.0,
|
||||||
'max_length': None,
|
'max_length': None,
|
||||||
'temperature':0.1,
|
'temperature':0.4,
|
||||||
}
|
}
|
||||||
import random
|
import random
|
||||||
N_EACH_REQ = random.randint(16, 32)
|
N_EACH_REQ = random.randint(16, 32)
|
||||||
@@ -352,9 +352,9 @@ def step_1_core_key_translate():
|
|||||||
chinese_core_keys_norepeat_mapping.update({k:cached_translation[k]})
|
chinese_core_keys_norepeat_mapping.update({k:cached_translation[k]})
|
||||||
chinese_core_keys_norepeat_mapping = dict(sorted(chinese_core_keys_norepeat_mapping.items(), key=lambda x: -len(x[0])))
|
chinese_core_keys_norepeat_mapping = dict(sorted(chinese_core_keys_norepeat_mapping.items(), key=lambda x: -len(x[0])))
|
||||||
|
|
||||||
# ===============================================
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
# copy
|
# copy
|
||||||
# ===============================================
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
def copy_source_code():
|
def copy_source_code():
|
||||||
|
|
||||||
from toolbox import get_conf
|
from toolbox import get_conf
|
||||||
@@ -367,9 +367,9 @@ def step_1_core_key_translate():
|
|||||||
shutil.copytree('./', backup_dir, ignore=lambda x, y: blacklist)
|
shutil.copytree('./', backup_dir, ignore=lambda x, y: blacklist)
|
||||||
copy_source_code()
|
copy_source_code()
|
||||||
|
|
||||||
# ===============================================
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
# primary key replace
|
# primary key replace
|
||||||
# ===============================================
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
directory_path = f'./multi-language/{LANG}/'
|
directory_path = f'./multi-language/{LANG}/'
|
||||||
for root, dirs, files in os.walk(directory_path):
|
for root, dirs, files in os.walk(directory_path):
|
||||||
for file in files:
|
for file in files:
|
||||||
@@ -389,9 +389,9 @@ def step_1_core_key_translate():
|
|||||||
|
|
||||||
def step_2_core_key_translate():
|
def step_2_core_key_translate():
|
||||||
|
|
||||||
# =================================================================================================
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
# step2
|
# step2
|
||||||
# =================================================================================================
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
def load_string(strings, string_input):
|
def load_string(strings, string_input):
|
||||||
string_ = string_input.strip().strip(',').strip().strip('.').strip()
|
string_ = string_input.strip().strip(',').strip().strip('.').strip()
|
||||||
@@ -492,9 +492,9 @@ def step_2_core_key_translate():
|
|||||||
cached_translation.update(read_map_from_json(language=LANG_STD))
|
cached_translation.update(read_map_from_json(language=LANG_STD))
|
||||||
cached_translation = dict(sorted(cached_translation.items(), key=lambda x: -len(x[0])))
|
cached_translation = dict(sorted(cached_translation.items(), key=lambda x: -len(x[0])))
|
||||||
|
|
||||||
# ===============================================
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
# literal key replace
|
# literal key replace
|
||||||
# ===============================================
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
directory_path = f'./multi-language/{LANG}/'
|
directory_path = f'./multi-language/{LANG}/'
|
||||||
for root, dirs, files in os.walk(directory_path):
|
for root, dirs, files in os.walk(directory_path):
|
||||||
for file in files:
|
for file in files:
|
||||||
|
|||||||
@@ -1,79 +1,35 @@
|
|||||||
# 如何使用其他大语言模型
|
P.S. 如果您按照以下步骤成功接入了新的大模型,欢迎发Pull Requests(如果您在自己接入新模型的过程中遇到困难,欢迎加README底部QQ群联系群主)
|
||||||
|
|
||||||
## ChatGLM
|
|
||||||
|
|
||||||
- 安装依赖 `pip install -r request_llms/requirements_chatglm.txt`
|
|
||||||
- 修改配置,在config.py中将LLM_MODEL的值改为"chatglm"
|
|
||||||
|
|
||||||
``` sh
|
|
||||||
LLM_MODEL = "chatglm"
|
|
||||||
```
|
|
||||||
- 运行!
|
|
||||||
``` sh
|
|
||||||
`python main.py`
|
|
||||||
```
|
|
||||||
|
|
||||||
## Claude-Stack
|
|
||||||
|
|
||||||
- 请参考此教程获取 https://zhuanlan.zhihu.com/p/627485689
|
|
||||||
- 1、SLACK_CLAUDE_BOT_ID
|
|
||||||
- 2、SLACK_CLAUDE_USER_TOKEN
|
|
||||||
|
|
||||||
- 把token加入config.py
|
|
||||||
|
|
||||||
## Newbing
|
|
||||||
|
|
||||||
- 使用cookie editor获取cookie(json)
|
|
||||||
- 把cookie(json)加入config.py (NEWBING_COOKIES)
|
|
||||||
|
|
||||||
## Moss
|
|
||||||
- 使用docker-compose
|
|
||||||
|
|
||||||
## RWKV
|
|
||||||
- 使用docker-compose
|
|
||||||
|
|
||||||
## LLAMA
|
|
||||||
- 使用docker-compose
|
|
||||||
|
|
||||||
## 盘古
|
|
||||||
- 使用docker-compose
|
|
||||||
|
|
||||||
|
|
||||||
---
|
# 如何接入其他本地大语言模型
|
||||||
## Text-Generation-UI (TGUI,调试中,暂不可用)
|
|
||||||
|
|
||||||
### 1. 部署TGUI
|
1. 复制`request_llms/bridge_llama2.py`,重命名为你喜欢的名字
|
||||||
``` sh
|
|
||||||
# 1 下载模型
|
|
||||||
git clone https://github.com/oobabooga/text-generation-webui.git
|
|
||||||
# 2 这个仓库的最新代码有问题,回滚到几周之前
|
|
||||||
git reset --hard fcda3f87767e642d1c0411776e549e1d3894843d
|
|
||||||
# 3 切换路径
|
|
||||||
cd text-generation-webui
|
|
||||||
# 4 安装text-generation的额外依赖
|
|
||||||
pip install accelerate bitsandbytes flexgen gradio llamacpp markdown numpy peft requests rwkv safetensors sentencepiece tqdm datasets git+https://github.com/huggingface/transformers
|
|
||||||
# 5 下载模型
|
|
||||||
python download-model.py facebook/galactica-1.3b
|
|
||||||
# 其他可选如 facebook/opt-1.3b
|
|
||||||
# facebook/galactica-1.3b
|
|
||||||
# facebook/galactica-6.7b
|
|
||||||
# facebook/galactica-120b
|
|
||||||
# facebook/pygmalion-1.3b 等
|
|
||||||
# 详情见 https://github.com/oobabooga/text-generation-webui
|
|
||||||
|
|
||||||
# 6 启动text-generation
|
2. 修改`load_model_and_tokenizer`方法,加载你的模型和分词器(去该模型官网找demo,复制粘贴即可)
|
||||||
python server.py --cpu --listen --listen-port 7865 --model facebook_galactica-1.3b
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. 修改config.py
|
3. 修改`llm_stream_generator`方法,定义推理模型(去该模型官网找demo,复制粘贴即可)
|
||||||
|
|
||||||
``` sh
|
4. 命令行测试
|
||||||
# LLM_MODEL格式: tgui:[模型]@[ws地址]:[ws端口] , 端口要和上面给定的端口一致
|
- 修改`tests/test_llms.py`(聪慧如您,只需要看一眼该文件就明白怎么修改了)
|
||||||
LLM_MODEL = "tgui:galactica-1.3b@localhost:7860"
|
- 运行`python tests/test_llms.py`
|
||||||
```
|
|
||||||
|
|
||||||
### 3. 运行!
|
5. 测试通过后,在`request_llms/bridge_all.py`中做最后的修改,把你的模型完全接入到框架中(聪慧如您,只需要看一眼该文件就明白怎么修改了)
|
||||||
``` sh
|
|
||||||
cd chatgpt-academic
|
6. 修改`LLM_MODEL`配置,然后运行`python main.py`,测试最后的效果
|
||||||
python main.py
|
|
||||||
```
|
|
||||||
|
# 如何接入其他在线大语言模型
|
||||||
|
|
||||||
|
1. 复制`request_llms/bridge_zhipu.py`,重命名为你喜欢的名字
|
||||||
|
|
||||||
|
2. 修改`predict_no_ui_long_connection`
|
||||||
|
|
||||||
|
3. 修改`predict`
|
||||||
|
|
||||||
|
4. 命令行测试
|
||||||
|
- 修改`tests/test_llms.py`(聪慧如您,只需要看一眼该文件就明白怎么修改了)
|
||||||
|
- 运行`python tests/test_llms.py`
|
||||||
|
|
||||||
|
5. 测试通过后,在`request_llms/bridge_all.py`中做最后的修改,把你的模型完全接入到框架中(聪慧如您,只需要看一眼该文件就明白怎么修改了)
|
||||||
|
|
||||||
|
6. 修改`LLM_MODEL`配置,然后运行`python main.py`,测试最后的效果
|
||||||
|
|||||||
@@ -11,7 +11,7 @@
|
|||||||
import tiktoken, copy
|
import tiktoken, copy
|
||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
from toolbox import get_conf, trimmed_format_exc
|
from toolbox import get_conf, trimmed_format_exc, apply_gpt_academic_string_mask
|
||||||
|
|
||||||
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
|
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
|
||||||
from .bridge_chatgpt import predict as chatgpt_ui
|
from .bridge_chatgpt import predict as chatgpt_ui
|
||||||
@@ -28,6 +28,9 @@ from .bridge_chatglm3 import predict as chatglm3_ui
|
|||||||
from .bridge_qianfan import predict_no_ui_long_connection as qianfan_noui
|
from .bridge_qianfan import predict_no_ui_long_connection as qianfan_noui
|
||||||
from .bridge_qianfan import predict as qianfan_ui
|
from .bridge_qianfan import predict as qianfan_ui
|
||||||
|
|
||||||
|
from .bridge_google_gemini import predict as genai_ui
|
||||||
|
from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui
|
||||||
|
|
||||||
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
|
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
|
||||||
|
|
||||||
class LazyloadTiktoken(object):
|
class LazyloadTiktoken(object):
|
||||||
@@ -246,6 +249,22 @@ model_info = {
|
|||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
|
"gemini-pro": {
|
||||||
|
"fn_with_ui": genai_ui,
|
||||||
|
"fn_without_ui": genai_noui,
|
||||||
|
"endpoint": None,
|
||||||
|
"max_token": 1024 * 32,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
"gemini-pro-vision": {
|
||||||
|
"fn_with_ui": genai_ui,
|
||||||
|
"fn_without_ui": genai_noui,
|
||||||
|
"endpoint": None,
|
||||||
|
"max_token": 1024 * 32,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
# -=-=-=-=-=-=- api2d 对齐支持 -=-=-=-=-=-=-
|
# -=-=-=-=-=-=- api2d 对齐支持 -=-=-=-=-=-=-
|
||||||
@@ -431,14 +450,14 @@ if "chatglm_onnx" in AVAIL_LLM_MODELS:
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
if "qwen" in AVAIL_LLM_MODELS:
|
if "qwen-local" in AVAIL_LLM_MODELS:
|
||||||
try:
|
try:
|
||||||
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
|
from .bridge_qwen_local import predict_no_ui_long_connection as qwen_local_noui
|
||||||
from .bridge_qwen import predict as qwen_ui
|
from .bridge_qwen_local import predict as qwen_local_ui
|
||||||
model_info.update({
|
model_info.update({
|
||||||
"qwen": {
|
"qwen-local": {
|
||||||
"fn_with_ui": qwen_ui,
|
"fn_with_ui": qwen_local_ui,
|
||||||
"fn_without_ui": qwen_noui,
|
"fn_without_ui": qwen_local_noui,
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 4096,
|
"max_token": 4096,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
@@ -447,16 +466,32 @@ if "qwen" in AVAIL_LLM_MODELS:
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
if "chatgpt_website" in AVAIL_LLM_MODELS: # 接入一些逆向工程https://github.com/acheong08/ChatGPT-to-API/
|
if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-max" in AVAIL_LLM_MODELS: # zhipuai
|
||||||
try:
|
try:
|
||||||
from .bridge_chatgpt_website import predict_no_ui_long_connection as chatgpt_website_noui
|
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
|
||||||
from .bridge_chatgpt_website import predict as chatgpt_website_ui
|
from .bridge_qwen import predict as qwen_ui
|
||||||
model_info.update({
|
model_info.update({
|
||||||
"chatgpt_website": {
|
"qwen-turbo": {
|
||||||
"fn_with_ui": chatgpt_website_ui,
|
"fn_with_ui": qwen_ui,
|
||||||
"fn_without_ui": chatgpt_website_noui,
|
"fn_without_ui": qwen_noui,
|
||||||
"endpoint": openai_endpoint,
|
"endpoint": None,
|
||||||
"max_token": 4096,
|
"max_token": 6144,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
"qwen-plus": {
|
||||||
|
"fn_with_ui": qwen_ui,
|
||||||
|
"fn_without_ui": qwen_noui,
|
||||||
|
"endpoint": None,
|
||||||
|
"max_token": 30720,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
"qwen-max": {
|
||||||
|
"fn_with_ui": qwen_ui,
|
||||||
|
"fn_without_ui": qwen_noui,
|
||||||
|
"endpoint": None,
|
||||||
|
"max_token": 28672,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
}
|
}
|
||||||
@@ -543,6 +578,39 @@ if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
|
if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
||||||
|
try:
|
||||||
|
from .bridge_deepseekcoder import predict_no_ui_long_connection as deepseekcoder_noui
|
||||||
|
from .bridge_deepseekcoder import predict as deepseekcoder_ui
|
||||||
|
model_info.update({
|
||||||
|
"deepseekcoder": {
|
||||||
|
"fn_with_ui": deepseekcoder_ui,
|
||||||
|
"fn_without_ui": deepseekcoder_noui,
|
||||||
|
"endpoint": None,
|
||||||
|
"max_token": 2048,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
except:
|
||||||
|
print(trimmed_format_exc())
|
||||||
|
# if "skylark" in AVAIL_LLM_MODELS:
|
||||||
|
# try:
|
||||||
|
# from .bridge_skylark2 import predict_no_ui_long_connection as skylark_noui
|
||||||
|
# from .bridge_skylark2 import predict as skylark_ui
|
||||||
|
# model_info.update({
|
||||||
|
# "skylark": {
|
||||||
|
# "fn_with_ui": skylark_ui,
|
||||||
|
# "fn_without_ui": skylark_noui,
|
||||||
|
# "endpoint": None,
|
||||||
|
# "max_token": 4096,
|
||||||
|
# "tokenizer": tokenizer_gpt35,
|
||||||
|
# "token_cnt": get_token_num_gpt35,
|
||||||
|
# }
|
||||||
|
# })
|
||||||
|
# except:
|
||||||
|
# print(trimmed_format_exc())
|
||||||
|
|
||||||
|
|
||||||
# <-- 用于定义和切换多个azure模型 -->
|
# <-- 用于定义和切换多个azure模型 -->
|
||||||
AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY")
|
AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY")
|
||||||
@@ -600,6 +668,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|||||||
"""
|
"""
|
||||||
import threading, time, copy
|
import threading, time, copy
|
||||||
|
|
||||||
|
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
|
||||||
model = llm_kwargs['llm_model']
|
model = llm_kwargs['llm_model']
|
||||||
n_model = 1
|
n_model = 1
|
||||||
if '&' not in model:
|
if '&' not in model:
|
||||||
@@ -673,6 +742,7 @@ def predict(inputs, llm_kwargs, *args, **kwargs):
|
|||||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
|
||||||
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项
|
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项
|
||||||
yield from method(inputs, llm_kwargs, *args, **kwargs)
|
yield from method(inputs, llm_kwargs, *args, **kwargs)
|
||||||
|
|
||||||
|
|||||||
@@ -51,7 +51,8 @@ def decode_chunk(chunk):
|
|||||||
chunkjson = json.loads(chunk_decoded[6:])
|
chunkjson = json.loads(chunk_decoded[6:])
|
||||||
has_choices = 'choices' in chunkjson
|
has_choices = 'choices' in chunkjson
|
||||||
if has_choices: choice_valid = (len(chunkjson['choices']) > 0)
|
if has_choices: choice_valid = (len(chunkjson['choices']) > 0)
|
||||||
if has_choices and choice_valid: has_content = "content" in chunkjson['choices'][0]["delta"]
|
if has_choices and choice_valid: has_content = ("content" in chunkjson['choices'][0]["delta"])
|
||||||
|
if has_content: has_content = (chunkjson['choices'][0]["delta"]["content"] is not None)
|
||||||
if has_choices and choice_valid: has_role = "role" in chunkjson['choices'][0]["delta"]
|
if has_choices and choice_valid: has_role = "role" in chunkjson['choices'][0]["delta"]
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
@@ -101,20 +102,25 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
result = ''
|
result = ''
|
||||||
json_data = None
|
json_data = None
|
||||||
while True:
|
while True:
|
||||||
try: chunk = next(stream_response).decode()
|
try: chunk = next(stream_response)
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
break
|
break
|
||||||
except requests.exceptions.ConnectionError:
|
except requests.exceptions.ConnectionError:
|
||||||
chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。
|
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
|
||||||
if len(chunk)==0: continue
|
chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role = decode_chunk(chunk)
|
||||||
if not chunk.startswith('data:'):
|
if len(chunk_decoded)==0: continue
|
||||||
error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode()
|
if not chunk_decoded.startswith('data:'):
|
||||||
|
error_msg = get_full_error(chunk, stream_response).decode()
|
||||||
if "reduce the length" in error_msg:
|
if "reduce the length" in error_msg:
|
||||||
raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
|
raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
|
raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
|
||||||
if ('data: [DONE]' in chunk): break # api2d 正常完成
|
if ('data: [DONE]' in chunk_decoded): break # api2d 正常完成
|
||||||
json_data = json.loads(chunk.lstrip('data:'))['choices'][0]
|
# 提前读取一些信息 (用于判断异常)
|
||||||
|
if has_choices and not choice_valid:
|
||||||
|
# 一些垃圾第三方接口的出现这样的错误
|
||||||
|
continue
|
||||||
|
json_data = chunkjson['choices'][0]
|
||||||
delta = json_data["delta"]
|
delta = json_data["delta"]
|
||||||
if len(delta) == 0: break
|
if len(delta) == 0: break
|
||||||
if "role" in delta: continue
|
if "role" in delta: continue
|
||||||
@@ -238,6 +244,9 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
if has_choices and not choice_valid:
|
if has_choices and not choice_valid:
|
||||||
# 一些垃圾第三方接口的出现这样的错误
|
# 一些垃圾第三方接口的出现这样的错误
|
||||||
continue
|
continue
|
||||||
|
if ('data: [DONE]' not in chunk_decoded) and len(chunk_decoded) > 0 and (chunkjson is None):
|
||||||
|
# 传递进来一些奇怪的东西
|
||||||
|
raise ValueError(f'无法读取以下数据,请检查配置。\n\n{chunk_decoded}')
|
||||||
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
||||||
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
||||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||||
|
|||||||
@@ -15,29 +15,16 @@ import requests
|
|||||||
import base64
|
import base64
|
||||||
import os
|
import os
|
||||||
import glob
|
import glob
|
||||||
|
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc, is_the_upload_folder, \
|
||||||
|
update_ui_lastest_msg, get_max_token, encode_image, have_any_recent_upload_image_files
|
||||||
|
|
||||||
|
|
||||||
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc, is_the_upload_folder, update_ui_lastest_msg, get_max_token
|
|
||||||
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
||||||
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY')
|
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY')
|
||||||
|
|
||||||
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
||||||
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
||||||
|
|
||||||
def have_any_recent_upload_image_files(chatbot):
|
|
||||||
_5min = 5 * 60
|
|
||||||
if chatbot is None: return False, None # chatbot is None
|
|
||||||
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
|
||||||
if not most_recent_uploaded: return False, None # most_recent_uploaded is None
|
|
||||||
if time.time() - most_recent_uploaded["time"] < _5min:
|
|
||||||
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
|
||||||
path = most_recent_uploaded['path']
|
|
||||||
file_manifest = [f for f in glob.glob(f'{path}/**/*.jpg', recursive=True)]
|
|
||||||
file_manifest += [f for f in glob.glob(f'{path}/**/*.jpeg', recursive=True)]
|
|
||||||
file_manifest += [f for f in glob.glob(f'{path}/**/*.png', recursive=True)]
|
|
||||||
if len(file_manifest) == 0: return False, None
|
|
||||||
return True, file_manifest # most_recent_uploaded is new
|
|
||||||
else:
|
|
||||||
return False, None # most_recent_uploaded is too old
|
|
||||||
|
|
||||||
def report_invalid_key(key):
|
def report_invalid_key(key):
|
||||||
if get_conf("BLOCK_INVALID_APIKEY"):
|
if get_conf("BLOCK_INVALID_APIKEY"):
|
||||||
@@ -258,10 +245,6 @@ def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg,
|
|||||||
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}")
|
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}")
|
||||||
return chatbot, history
|
return chatbot, history
|
||||||
|
|
||||||
# Function to encode the image
|
|
||||||
def encode_image(image_path):
|
|
||||||
with open(image_path, "rb") as image_file:
|
|
||||||
return base64.b64encode(image_file.read()).decode('utf-8')
|
|
||||||
|
|
||||||
def generate_payload(inputs, llm_kwargs, history, system_prompt, image_paths):
|
def generate_payload(inputs, llm_kwargs, history, system_prompt, image_paths):
|
||||||
"""
|
"""
|
||||||
|
|||||||
129
request_llms/bridge_deepseekcoder.py
普通文件
129
request_llms/bridge_deepseekcoder.py
普通文件
@@ -0,0 +1,129 @@
|
|||||||
|
model_name = "deepseek-coder-6.7b-instruct"
|
||||||
|
cmd_to_install = "未知" # "`pip install -r request_llms/requirements_qwen.txt`"
|
||||||
|
|
||||||
|
import os
|
||||||
|
from toolbox import ProxyNetworkActivate
|
||||||
|
from toolbox import get_conf
|
||||||
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
|
from threading import Thread
|
||||||
|
import torch
|
||||||
|
|
||||||
|
def download_huggingface_model(model_name, max_retry, local_dir):
|
||||||
|
from huggingface_hub import snapshot_download
|
||||||
|
for i in range(1, max_retry):
|
||||||
|
try:
|
||||||
|
snapshot_download(repo_id=model_name, local_dir=local_dir, resume_download=True)
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
print(f'\n\n下载失败,重试第{i}次中...\n\n')
|
||||||
|
return local_dir
|
||||||
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
|
# 🔌💻 Local Model
|
||||||
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
|
class GetCoderLMHandle(LocalLLMHandle):
|
||||||
|
|
||||||
|
def load_model_info(self):
|
||||||
|
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||||
|
self.model_name = model_name
|
||||||
|
self.cmd_to_install = cmd_to_install
|
||||||
|
|
||||||
|
def load_model_and_tokenizer(self):
|
||||||
|
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||||
|
with ProxyNetworkActivate('Download_LLM'):
|
||||||
|
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
|
||||||
|
model_name = "deepseek-ai/deepseek-coder-6.7b-instruct"
|
||||||
|
# local_dir = f"~/.cache/{model_name}"
|
||||||
|
# if not os.path.exists(local_dir):
|
||||||
|
# tokenizer = download_huggingface_model(model_name, max_retry=128, local_dir=local_dir)
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
||||||
|
self._streamer = TextIteratorStreamer(tokenizer)
|
||||||
|
device_map = {
|
||||||
|
"transformer.word_embeddings": 0,
|
||||||
|
"transformer.word_embeddings_layernorm": 0,
|
||||||
|
"lm_head": 0,
|
||||||
|
"transformer.h": 0,
|
||||||
|
"transformer.ln_f": 0,
|
||||||
|
"model.embed_tokens": 0,
|
||||||
|
"model.layers": 0,
|
||||||
|
"model.norm": 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
# 检查量化配置
|
||||||
|
quantization_type = get_conf('LOCAL_MODEL_QUANT')
|
||||||
|
|
||||||
|
if get_conf('LOCAL_MODEL_DEVICE') != 'cpu':
|
||||||
|
if quantization_type == "INT8":
|
||||||
|
from transformers import BitsAndBytesConfig
|
||||||
|
# 使用 INT8 量化
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, load_in_8bit=True,
|
||||||
|
device_map=device_map)
|
||||||
|
elif quantization_type == "INT4":
|
||||||
|
from transformers import BitsAndBytesConfig
|
||||||
|
# 使用 INT4 量化
|
||||||
|
bnb_config = BitsAndBytesConfig(
|
||||||
|
load_in_4bit=True,
|
||||||
|
bnb_4bit_use_double_quant=True,
|
||||||
|
bnb_4bit_quant_type="nf4",
|
||||||
|
bnb_4bit_compute_dtype=torch.bfloat16
|
||||||
|
)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True,
|
||||||
|
quantization_config=bnb_config, device_map=device_map)
|
||||||
|
else:
|
||||||
|
# 使用默认的 FP16
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True,
|
||||||
|
torch_dtype=torch.bfloat16, device_map=device_map)
|
||||||
|
else:
|
||||||
|
# CPU 模式
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True,
|
||||||
|
torch_dtype=torch.bfloat16)
|
||||||
|
|
||||||
|
return model, tokenizer
|
||||||
|
|
||||||
|
def llm_stream_generator(self, **kwargs):
|
||||||
|
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||||
|
def adaptor(kwargs):
|
||||||
|
query = kwargs['query']
|
||||||
|
max_length = kwargs['max_length']
|
||||||
|
top_p = kwargs['top_p']
|
||||||
|
temperature = kwargs['temperature']
|
||||||
|
history = kwargs['history']
|
||||||
|
return query, max_length, top_p, temperature, history
|
||||||
|
|
||||||
|
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
||||||
|
history.append({ 'role': 'user', 'content': query})
|
||||||
|
messages = history
|
||||||
|
inputs = self._tokenizer.apply_chat_template(messages, return_tensors="pt")
|
||||||
|
if inputs.shape[1] > max_length:
|
||||||
|
inputs = inputs[:, -max_length:]
|
||||||
|
inputs = inputs.to(self._model.device)
|
||||||
|
generation_kwargs = dict(
|
||||||
|
inputs=inputs,
|
||||||
|
max_new_tokens=max_length,
|
||||||
|
do_sample=False,
|
||||||
|
top_p=top_p,
|
||||||
|
streamer = self._streamer,
|
||||||
|
top_k=50,
|
||||||
|
temperature=temperature,
|
||||||
|
num_return_sequences=1,
|
||||||
|
eos_token_id=32021,
|
||||||
|
)
|
||||||
|
thread = Thread(target=self._model.generate, kwargs=generation_kwargs, daemon=True)
|
||||||
|
thread.start()
|
||||||
|
generated_text = ""
|
||||||
|
for new_text in self._streamer:
|
||||||
|
generated_text += new_text
|
||||||
|
# print(generated_text)
|
||||||
|
yield generated_text
|
||||||
|
|
||||||
|
|
||||||
|
def try_to_import_special_deps(self, **kwargs): pass
|
||||||
|
# import something that will raise error if the user does not install requirement_*.txt
|
||||||
|
# 🏃♂️🏃♂️🏃♂️ 主进程执行
|
||||||
|
# import importlib
|
||||||
|
# importlib.import_module('modelscope')
|
||||||
|
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
|
# 🔌💻 GPT-Academic Interface
|
||||||
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
|
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetCoderLMHandle, model_name, history_format='chatglm3')
|
||||||
114
request_llms/bridge_google_gemini.py
普通文件
114
request_llms/bridge_google_gemini.py
普通文件
@@ -0,0 +1,114 @@
|
|||||||
|
# encoding: utf-8
|
||||||
|
# @Time : 2023/12/21
|
||||||
|
# @Author : Spike
|
||||||
|
# @Descr :
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from request_llms.com_google import GoogleChatInit
|
||||||
|
from toolbox import get_conf, update_ui, update_ui_lastest_msg, have_any_recent_upload_image_files, trimmed_format_exc
|
||||||
|
|
||||||
|
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY')
|
||||||
|
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
||||||
|
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
||||||
|
|
||||||
|
|
||||||
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None,
|
||||||
|
console_slience=False):
|
||||||
|
# 检查API_KEY
|
||||||
|
if get_conf("GEMINI_API_KEY") == "":
|
||||||
|
raise ValueError(f"请配置 GEMINI_API_KEY。")
|
||||||
|
|
||||||
|
genai = GoogleChatInit()
|
||||||
|
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
||||||
|
gpt_replying_buffer = ''
|
||||||
|
stream_response = genai.generate_chat(inputs, llm_kwargs, history, sys_prompt)
|
||||||
|
for response in stream_response:
|
||||||
|
results = response.decode()
|
||||||
|
match = re.search(r'"text":\s*"((?:[^"\\]|\\.)*)"', results, flags=re.DOTALL)
|
||||||
|
error_match = re.search(r'\"message\":\s*\"(.*?)\"', results, flags=re.DOTALL)
|
||||||
|
if match:
|
||||||
|
try:
|
||||||
|
paraphrase = json.loads('{"text": "%s"}' % match.group(1))
|
||||||
|
except:
|
||||||
|
raise ValueError(f"解析GEMINI消息出错。")
|
||||||
|
buffer = paraphrase['text']
|
||||||
|
gpt_replying_buffer += buffer
|
||||||
|
if len(observe_window) >= 1:
|
||||||
|
observe_window[0] = gpt_replying_buffer
|
||||||
|
if len(observe_window) >= 2:
|
||||||
|
if (time.time() - observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
|
||||||
|
if error_match:
|
||||||
|
raise RuntimeError(f'{gpt_replying_buffer} 对话错误')
|
||||||
|
return gpt_replying_buffer
|
||||||
|
|
||||||
|
|
||||||
|
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
|
||||||
|
# 检查API_KEY
|
||||||
|
if get_conf("GEMINI_API_KEY") == "":
|
||||||
|
yield from update_ui_lastest_msg(f"请配置 GEMINI_API_KEY。", chatbot=chatbot, history=history, delay=0)
|
||||||
|
return
|
||||||
|
|
||||||
|
# 适配润色区域
|
||||||
|
if additional_fn is not None:
|
||||||
|
from core_functional import handle_core_functionality
|
||||||
|
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||||
|
|
||||||
|
if "vision" in llm_kwargs["llm_model"]:
|
||||||
|
have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot)
|
||||||
|
def make_media_input(inputs, image_paths):
|
||||||
|
for image_path in image_paths:
|
||||||
|
inputs = inputs + f'<br/><br/><div align="center"><img src="file={os.path.abspath(image_path)}"></div>'
|
||||||
|
return inputs
|
||||||
|
if have_recent_file:
|
||||||
|
inputs = make_media_input(inputs, image_paths)
|
||||||
|
|
||||||
|
chatbot.append((inputs, ""))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
genai = GoogleChatInit()
|
||||||
|
retry = 0
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
stream_response = genai.generate_chat(inputs, llm_kwargs, history, system_prompt)
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
retry += 1
|
||||||
|
chatbot[-1] = ((chatbot[-1][0], trimmed_format_exc()))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="请求失败") # 刷新界面
|
||||||
|
return
|
||||||
|
gpt_replying_buffer = ""
|
||||||
|
gpt_security_policy = ""
|
||||||
|
history.extend([inputs, ''])
|
||||||
|
for response in stream_response:
|
||||||
|
results = response.decode("utf-8") # 被这个解码给耍了。。
|
||||||
|
gpt_security_policy += results
|
||||||
|
match = re.search(r'"text":\s*"((?:[^"\\]|\\.)*)"', results, flags=re.DOTALL)
|
||||||
|
error_match = re.search(r'\"message\":\s*\"(.*)\"', results, flags=re.DOTALL)
|
||||||
|
if match:
|
||||||
|
try:
|
||||||
|
paraphrase = json.loads('{"text": "%s"}' % match.group(1))
|
||||||
|
except:
|
||||||
|
raise ValueError(f"解析GEMINI消息出错。")
|
||||||
|
gpt_replying_buffer += paraphrase['text'] # 使用 json 解析库进行处理
|
||||||
|
chatbot[-1] = (inputs, gpt_replying_buffer)
|
||||||
|
history[-1] = gpt_replying_buffer
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
if error_match:
|
||||||
|
history = history[-2] # 错误的不纳入对话
|
||||||
|
chatbot[-1] = (inputs, gpt_replying_buffer + f"对话错误,请查看message\n\n```\n{error_match.group(1)}\n```")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
raise RuntimeError('对话错误')
|
||||||
|
if not gpt_replying_buffer:
|
||||||
|
history = history[-2] # 错误的不纳入对话
|
||||||
|
chatbot[-1] = (inputs, gpt_replying_buffer + f"触发了Google的安全访问策略,没有回答\n\n```\n{gpt_security_policy}\n```")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
import sys
|
||||||
|
llm_kwargs = {'llm_model': 'gemini-pro'}
|
||||||
|
result = predict('Write long a story about a magic backpack.', llm_kwargs, llm_kwargs, [])
|
||||||
|
for i in result:
|
||||||
|
print(i)
|
||||||
@@ -12,7 +12,7 @@ from threading import Thread
|
|||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 Local Model
|
# 🔌💻 Local Model
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
class GetONNXGLMHandle(LocalLLMHandle):
|
class GetLlamaHandle(LocalLLMHandle):
|
||||||
|
|
||||||
def load_model_info(self):
|
def load_model_info(self):
|
||||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||||
@@ -87,4 +87,4 @@ class GetONNXGLMHandle(LocalLLMHandle):
|
|||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 GPT-Academic Interface
|
# 🔌💻 GPT-Academic Interface
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetONNXGLMHandle, model_name)
|
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetLlamaHandle, model_name)
|
||||||
@@ -1,16 +1,17 @@
|
|||||||
"""
|
"""
|
||||||
========================================================================
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
第一部分:来自EdgeGPT.py
|
第一部分:来自EdgeGPT.py
|
||||||
https://github.com/acheong08/EdgeGPT
|
https://github.com/acheong08/EdgeGPT
|
||||||
========================================================================
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
"""
|
"""
|
||||||
from .edge_gpt_free import Chatbot as NewbingChatbot
|
from .edge_gpt_free import Chatbot as NewbingChatbot
|
||||||
|
|
||||||
load_message = "等待NewBing响应。"
|
load_message = "等待NewBing响应。"
|
||||||
|
|
||||||
"""
|
"""
|
||||||
========================================================================
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
第二部分:子进程Worker(调用主体)
|
第二部分:子进程Worker(调用主体)
|
||||||
========================================================================
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
"""
|
"""
|
||||||
import time
|
import time
|
||||||
import json
|
import json
|
||||||
@@ -22,19 +23,30 @@ import threading
|
|||||||
from toolbox import update_ui, get_conf, trimmed_format_exc
|
from toolbox import update_ui, get_conf, trimmed_format_exc
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
|
|
||||||
|
|
||||||
def preprocess_newbing_out(s):
|
def preprocess_newbing_out(s):
|
||||||
pattern = r'\^(\d+)\^' # 匹配^数字^
|
pattern = r"\^(\d+)\^" # 匹配^数字^
|
||||||
sub = lambda m: '('+m.group(1)+')' # 将匹配到的数字作为替换值
|
sub = lambda m: "(" + m.group(1) + ")" # 将匹配到的数字作为替换值
|
||||||
result = re.sub(pattern, sub, s) # 替换操作
|
result = re.sub(pattern, sub, s) # 替换操作
|
||||||
if '[1]' in result:
|
if "[1]" in result:
|
||||||
result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
|
result += (
|
||||||
|
"\n\n```reference\n"
|
||||||
|
+ "\n".join([r for r in result.split("\n") if r.startswith("[")])
|
||||||
|
+ "\n```\n"
|
||||||
|
)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def preprocess_newbing_out_simple(result):
|
def preprocess_newbing_out_simple(result):
|
||||||
if '[1]' in result:
|
if "[1]" in result:
|
||||||
result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
|
result += (
|
||||||
|
"\n\n```reference\n"
|
||||||
|
+ "\n".join([r for r in result.split("\n") if r.startswith("[")])
|
||||||
|
+ "\n```\n"
|
||||||
|
)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
class NewBingHandle(Process):
|
class NewBingHandle(Process):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(daemon=True)
|
super().__init__(daemon=True)
|
||||||
@@ -51,6 +63,7 @@ class NewBingHandle(Process):
|
|||||||
try:
|
try:
|
||||||
self.success = False
|
self.success = False
|
||||||
import certifi, httpx, rich
|
import certifi, httpx, rich
|
||||||
|
|
||||||
self.info = "依赖检测通过,等待NewBing响应。注意目前不能多人同时调用NewBing接口(有线程锁),否则将导致每个人的NewBing问询历史互相渗透。调用NewBing时,会自动使用已配置的代理。"
|
self.info = "依赖检测通过,等待NewBing响应。注意目前不能多人同时调用NewBing接口(有线程锁),否则将导致每个人的NewBing问询历史互相渗透。调用NewBing时,会自动使用已配置的代理。"
|
||||||
self.success = True
|
self.success = True
|
||||||
except:
|
except:
|
||||||
@@ -62,18 +75,19 @@ class NewBingHandle(Process):
|
|||||||
|
|
||||||
async def async_run(self):
|
async def async_run(self):
|
||||||
# 读取配置
|
# 读取配置
|
||||||
NEWBING_STYLE = get_conf('NEWBING_STYLE')
|
NEWBING_STYLE = get_conf("NEWBING_STYLE")
|
||||||
from request_llms.bridge_all import model_info
|
from request_llms.bridge_all import model_info
|
||||||
endpoint = model_info['newbing']['endpoint']
|
|
||||||
|
endpoint = model_info["newbing"]["endpoint"]
|
||||||
while True:
|
while True:
|
||||||
# 等待
|
# 等待
|
||||||
kwargs = self.child.recv()
|
kwargs = self.child.recv()
|
||||||
question=kwargs['query']
|
question = kwargs["query"]
|
||||||
history=kwargs['history']
|
history = kwargs["history"]
|
||||||
system_prompt=kwargs['system_prompt']
|
system_prompt = kwargs["system_prompt"]
|
||||||
|
|
||||||
# 是否重置
|
# 是否重置
|
||||||
if len(self.local_history) > 0 and len(history)==0:
|
if len(self.local_history) > 0 and len(history) == 0:
|
||||||
await self.newbing_model.reset()
|
await self.newbing_model.reset()
|
||||||
self.local_history = []
|
self.local_history = []
|
||||||
|
|
||||||
@@ -81,34 +95,33 @@ class NewBingHandle(Process):
|
|||||||
prompt = ""
|
prompt = ""
|
||||||
if system_prompt not in self.local_history:
|
if system_prompt not in self.local_history:
|
||||||
self.local_history.append(system_prompt)
|
self.local_history.append(system_prompt)
|
||||||
prompt += system_prompt + '\n'
|
prompt += system_prompt + "\n"
|
||||||
|
|
||||||
# 追加历史
|
# 追加历史
|
||||||
for ab in history:
|
for ab in history:
|
||||||
a, b = ab
|
a, b = ab
|
||||||
if a not in self.local_history:
|
if a not in self.local_history:
|
||||||
self.local_history.append(a)
|
self.local_history.append(a)
|
||||||
prompt += a + '\n'
|
prompt += a + "\n"
|
||||||
|
|
||||||
# 问题
|
# 问题
|
||||||
prompt += question
|
prompt += question
|
||||||
self.local_history.append(question)
|
self.local_history.append(question)
|
||||||
print('question:', prompt)
|
print("question:", prompt)
|
||||||
# 提交
|
# 提交
|
||||||
async for final, response in self.newbing_model.ask_stream(
|
async for final, response in self.newbing_model.ask_stream(
|
||||||
prompt=question,
|
prompt=question,
|
||||||
conversation_style=NEWBING_STYLE, # ["creative", "balanced", "precise"]
|
conversation_style=NEWBING_STYLE, # ["creative", "balanced", "precise"]
|
||||||
wss_link=endpoint, # "wss://sydney.bing.com/sydney/ChatHub"
|
wss_link=endpoint, # "wss://sydney.bing.com/sydney/ChatHub"
|
||||||
):
|
):
|
||||||
if not final:
|
if not final:
|
||||||
print(response)
|
print(response)
|
||||||
self.child.send(str(response))
|
self.child.send(str(response))
|
||||||
else:
|
else:
|
||||||
print('-------- receive final ---------')
|
print("-------- receive final ---------")
|
||||||
self.child.send('[Finish]')
|
self.child.send("[Finish]")
|
||||||
# self.local_history.append(response)
|
# self.local_history.append(response)
|
||||||
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
"""
|
"""
|
||||||
这个函数运行在子进程
|
这个函数运行在子进程
|
||||||
@@ -118,32 +131,37 @@ class NewBingHandle(Process):
|
|||||||
self.local_history = []
|
self.local_history = []
|
||||||
if (self.newbing_model is None) or (not self.success):
|
if (self.newbing_model is None) or (not self.success):
|
||||||
# 代理设置
|
# 代理设置
|
||||||
proxies, NEWBING_COOKIES = get_conf('proxies', 'NEWBING_COOKIES')
|
proxies, NEWBING_COOKIES = get_conf("proxies", "NEWBING_COOKIES")
|
||||||
if proxies is None:
|
if proxies is None:
|
||||||
self.proxies_https = None
|
self.proxies_https = None
|
||||||
else:
|
else:
|
||||||
self.proxies_https = proxies['https']
|
self.proxies_https = proxies["https"]
|
||||||
|
|
||||||
if (NEWBING_COOKIES is not None) and len(NEWBING_COOKIES) > 100:
|
if (NEWBING_COOKIES is not None) and len(NEWBING_COOKIES) > 100:
|
||||||
try:
|
try:
|
||||||
cookies = json.loads(NEWBING_COOKIES)
|
cookies = json.loads(NEWBING_COOKIES)
|
||||||
except:
|
except:
|
||||||
self.success = False
|
self.success = False
|
||||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
|
||||||
self.child.send(f'[Local Message] NEWBING_COOKIES未填写或有格式错误。')
|
self.child.send(f"[Local Message] NEWBING_COOKIES未填写或有格式错误。")
|
||||||
self.child.send('[Fail]'); self.child.send('[Finish]')
|
self.child.send("[Fail]")
|
||||||
|
self.child.send("[Finish]")
|
||||||
raise RuntimeError(f"NEWBING_COOKIES未填写或有格式错误。")
|
raise RuntimeError(f"NEWBING_COOKIES未填写或有格式错误。")
|
||||||
else:
|
else:
|
||||||
cookies = None
|
cookies = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.newbing_model = NewbingChatbot(proxy=self.proxies_https, cookies=cookies)
|
self.newbing_model = NewbingChatbot(
|
||||||
|
proxy=self.proxies_https, cookies=cookies
|
||||||
|
)
|
||||||
except:
|
except:
|
||||||
self.success = False
|
self.success = False
|
||||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
|
||||||
self.child.send(f'[Local Message] 不能加载Newbing组件,请注意Newbing组件已不再维护。{tb_str}')
|
self.child.send(
|
||||||
self.child.send('[Fail]')
|
f"[Local Message] 不能加载Newbing组件,请注意Newbing组件已不再维护。{tb_str}"
|
||||||
self.child.send('[Finish]')
|
)
|
||||||
|
self.child.send("[Fail]")
|
||||||
|
self.child.send("[Finish]")
|
||||||
raise RuntimeError(f"不能加载Newbing组件,请注意Newbing组件已不再维护。")
|
raise RuntimeError(f"不能加载Newbing组件,请注意Newbing组件已不再维护。")
|
||||||
|
|
||||||
self.success = True
|
self.success = True
|
||||||
@@ -151,42 +169,57 @@ class NewBingHandle(Process):
|
|||||||
# 进入任务等待状态
|
# 进入任务等待状态
|
||||||
asyncio.run(self.async_run())
|
asyncio.run(self.async_run())
|
||||||
except Exception:
|
except Exception:
|
||||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
|
||||||
self.child.send(f'[Local Message] Newbing 请求失败,报错信息如下. 如果是与网络相关的问题,建议更换代理协议(推荐http)或代理节点 {tb_str}.')
|
self.child.send(
|
||||||
self.child.send('[Fail]')
|
f"[Local Message] Newbing 请求失败,报错信息如下. 如果是与网络相关的问题,建议更换代理协议(推荐http)或代理节点 {tb_str}."
|
||||||
self.child.send('[Finish]')
|
)
|
||||||
|
self.child.send("[Fail]")
|
||||||
|
self.child.send("[Finish]")
|
||||||
|
|
||||||
def stream_chat(self, **kwargs):
|
def stream_chat(self, **kwargs):
|
||||||
"""
|
"""
|
||||||
这个函数运行在主进程
|
这个函数运行在主进程
|
||||||
"""
|
"""
|
||||||
self.threadLock.acquire() # 获取线程锁
|
self.threadLock.acquire() # 获取线程锁
|
||||||
self.parent.send(kwargs) # 请求子进程
|
self.parent.send(kwargs) # 请求子进程
|
||||||
while True:
|
while True:
|
||||||
res = self.parent.recv() # 等待newbing回复的片段
|
res = self.parent.recv() # 等待newbing回复的片段
|
||||||
if res == '[Finish]': break # 结束
|
if res == "[Finish]":
|
||||||
elif res == '[Fail]': self.success = False; break # 失败
|
break # 结束
|
||||||
else: yield res # newbing回复的片段
|
elif res == "[Fail]":
|
||||||
self.threadLock.release() # 释放线程锁
|
self.success = False
|
||||||
|
break # 失败
|
||||||
|
else:
|
||||||
|
yield res # newbing回复的片段
|
||||||
|
self.threadLock.release() # 释放线程锁
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
========================================================================
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
第三部分:主进程统一调用函数接口
|
第三部分:主进程统一调用函数接口
|
||||||
========================================================================
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
"""
|
"""
|
||||||
global newbingfree_handle
|
global newbingfree_handle
|
||||||
newbingfree_handle = None
|
newbingfree_handle = None
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
|
||||||
|
def predict_no_ui_long_connection(
|
||||||
|
inputs,
|
||||||
|
llm_kwargs,
|
||||||
|
history=[],
|
||||||
|
sys_prompt="",
|
||||||
|
observe_window=[],
|
||||||
|
console_slience=False,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
多线程方法
|
多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
"""
|
"""
|
||||||
global newbingfree_handle
|
global newbingfree_handle
|
||||||
if (newbingfree_handle is None) or (not newbingfree_handle.success):
|
if (newbingfree_handle is None) or (not newbingfree_handle.success):
|
||||||
newbingfree_handle = NewBingHandle()
|
newbingfree_handle = NewBingHandle()
|
||||||
if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + newbingfree_handle.info
|
if len(observe_window) >= 1:
|
||||||
|
observe_window[0] = load_message + "\n\n" + newbingfree_handle.info
|
||||||
if not newbingfree_handle.success:
|
if not newbingfree_handle.success:
|
||||||
error = newbingfree_handle.info
|
error = newbingfree_handle.info
|
||||||
newbingfree_handle = None
|
newbingfree_handle = None
|
||||||
@@ -194,23 +227,42 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
|
|
||||||
# 没有 sys_prompt 接口,因此把prompt加入 history
|
# 没有 sys_prompt 接口,因此把prompt加入 history
|
||||||
history_feedin = []
|
history_feedin = []
|
||||||
for i in range(len(history)//2):
|
for i in range(len(history) // 2):
|
||||||
history_feedin.append([history[2*i], history[2*i+1]] )
|
history_feedin.append([history[2 * i], history[2 * i + 1]])
|
||||||
|
|
||||||
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
||||||
response = ""
|
response = ""
|
||||||
if len(observe_window) >= 1: observe_window[0] = "[Local Message] 等待NewBing响应中 ..."
|
if len(observe_window) >= 1:
|
||||||
for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
observe_window[0] = "[Local Message] 等待NewBing响应中 ..."
|
||||||
if len(observe_window) >= 1: observe_window[0] = preprocess_newbing_out_simple(response)
|
for response in newbingfree_handle.stream_chat(
|
||||||
|
query=inputs,
|
||||||
|
history=history_feedin,
|
||||||
|
system_prompt=sys_prompt,
|
||||||
|
max_length=llm_kwargs["max_length"],
|
||||||
|
top_p=llm_kwargs["top_p"],
|
||||||
|
temperature=llm_kwargs["temperature"],
|
||||||
|
):
|
||||||
|
if len(observe_window) >= 1:
|
||||||
|
observe_window[0] = preprocess_newbing_out_simple(response)
|
||||||
if len(observe_window) >= 2:
|
if len(observe_window) >= 2:
|
||||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
if (time.time() - observe_window[1]) > watch_dog_patience:
|
||||||
raise RuntimeError("程序终止。")
|
raise RuntimeError("程序终止。")
|
||||||
return preprocess_newbing_out_simple(response)
|
return preprocess_newbing_out_simple(response)
|
||||||
|
|
||||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
|
||||||
|
def predict(
|
||||||
|
inputs,
|
||||||
|
llm_kwargs,
|
||||||
|
plugin_kwargs,
|
||||||
|
chatbot,
|
||||||
|
history=[],
|
||||||
|
system_prompt="",
|
||||||
|
stream=True,
|
||||||
|
additional_fn=None,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
单线程方法
|
单线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
"""
|
"""
|
||||||
chatbot.append((inputs, "[Local Message] 等待NewBing响应中 ..."))
|
chatbot.append((inputs, "[Local Message] 等待NewBing响应中 ..."))
|
||||||
|
|
||||||
@@ -225,21 +277,35 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
|
|
||||||
if additional_fn is not None:
|
if additional_fn is not None:
|
||||||
from core_functional import handle_core_functionality
|
from core_functional import handle_core_functionality
|
||||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
|
||||||
|
inputs, history = handle_core_functionality(
|
||||||
|
additional_fn, inputs, history, chatbot
|
||||||
|
)
|
||||||
|
|
||||||
history_feedin = []
|
history_feedin = []
|
||||||
for i in range(len(history)//2):
|
for i in range(len(history) // 2):
|
||||||
history_feedin.append([history[2*i], history[2*i+1]] )
|
history_feedin.append([history[2 * i], history[2 * i + 1]])
|
||||||
|
|
||||||
chatbot[-1] = (inputs, "[Local Message] 等待NewBing响应中 ...")
|
chatbot[-1] = (inputs, "[Local Message] 等待NewBing响应中 ...")
|
||||||
response = "[Local Message] 等待NewBing响应中 ..."
|
response = "[Local Message] 等待NewBing响应中 ..."
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
|
yield from update_ui(
|
||||||
for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。"
|
||||||
|
)
|
||||||
|
for response in newbingfree_handle.stream_chat(
|
||||||
|
query=inputs,
|
||||||
|
history=history_feedin,
|
||||||
|
system_prompt=system_prompt,
|
||||||
|
max_length=llm_kwargs["max_length"],
|
||||||
|
top_p=llm_kwargs["top_p"],
|
||||||
|
temperature=llm_kwargs["temperature"],
|
||||||
|
):
|
||||||
chatbot[-1] = (inputs, preprocess_newbing_out(response))
|
chatbot[-1] = (inputs, preprocess_newbing_out(response))
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
|
yield from update_ui(
|
||||||
if response == "[Local Message] 等待NewBing响应中 ...": response = "[Local Message] NewBing响应异常,请刷新界面重试 ..."
|
chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。"
|
||||||
|
)
|
||||||
|
if response == "[Local Message] 等待NewBing响应中 ...":
|
||||||
|
response = "[Local Message] NewBing响应异常,请刷新界面重试 ..."
|
||||||
history.extend([inputs, response])
|
history.extend([inputs, response])
|
||||||
logging.info(f'[raw_input] {inputs}')
|
logging.info(f"[raw_input] {inputs}")
|
||||||
logging.info(f'[response] {response}')
|
logging.info(f"[response] {response}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")
|
yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")
|
||||||
|
|
||||||
|
|||||||
@@ -1,67 +1,62 @@
|
|||||||
model_name = "Qwen"
|
|
||||||
cmd_to_install = "`pip install -r request_llms/requirements_qwen.txt`"
|
|
||||||
|
|
||||||
|
|
||||||
from transformers import AutoModel, AutoTokenizer
|
|
||||||
import time
|
import time
|
||||||
import threading
|
import os
|
||||||
import importlib
|
from toolbox import update_ui, get_conf, update_ui_lastest_msg
|
||||||
from toolbox import update_ui, get_conf, ProxyNetworkActivate
|
from toolbox import check_packages, report_exception
|
||||||
from multiprocessing import Process, Pipe
|
|
||||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
|
||||||
|
|
||||||
|
model_name = 'Qwen'
|
||||||
|
|
||||||
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||||
|
"""
|
||||||
|
⭐多线程方法
|
||||||
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
|
"""
|
||||||
|
watch_dog_patience = 5
|
||||||
|
response = ""
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
from .com_qwenapi import QwenRequestInstance
|
||||||
# 🔌💻 Local Model
|
sri = QwenRequestInstance()
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
|
||||||
class GetONNXGLMHandle(LocalLLMHandle):
|
if len(observe_window) >= 1:
|
||||||
|
observe_window[0] = response
|
||||||
|
if len(observe_window) >= 2:
|
||||||
|
if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
|
||||||
|
return response
|
||||||
|
|
||||||
def load_model_info(self):
|
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
"""
|
||||||
self.model_name = model_name
|
⭐单线程方法
|
||||||
self.cmd_to_install = cmd_to_install
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
|
"""
|
||||||
|
chatbot.append((inputs, ""))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
def load_model_and_tokenizer(self):
|
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
try:
|
||||||
import os, glob
|
check_packages(["dashscope"])
|
||||||
import os
|
except:
|
||||||
import platform
|
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade dashscope```。",
|
||||||
from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
chatbot=chatbot, history=history, delay=0)
|
||||||
|
return
|
||||||
|
|
||||||
with ProxyNetworkActivate('Download_LLM'):
|
# 检查DASHSCOPE_API_KEY
|
||||||
model_id = 'qwen/Qwen-7B-Chat'
|
if get_conf("DASHSCOPE_API_KEY") == "":
|
||||||
self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True)
|
yield from update_ui_lastest_msg(f"请配置 DASHSCOPE_API_KEY。",
|
||||||
# use fp16
|
chatbot=chatbot, history=history, delay=0)
|
||||||
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval()
|
return
|
||||||
model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
|
|
||||||
self._model = model
|
|
||||||
|
|
||||||
return self._model, self._tokenizer
|
if additional_fn is not None:
|
||||||
|
from core_functional import handle_core_functionality
|
||||||
|
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||||
|
|
||||||
def llm_stream_generator(self, **kwargs):
|
# 开始接收回复
|
||||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
from .com_qwenapi import QwenRequestInstance
|
||||||
def adaptor(kwargs):
|
sri = QwenRequestInstance()
|
||||||
query = kwargs['query']
|
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
|
||||||
max_length = kwargs['max_length']
|
chatbot[-1] = (inputs, response)
|
||||||
top_p = kwargs['top_p']
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
temperature = kwargs['temperature']
|
|
||||||
history = kwargs['history']
|
|
||||||
return query, max_length, top_p, temperature, history
|
|
||||||
|
|
||||||
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
# 总结输出
|
||||||
|
if response == f"[Local Message] 等待{model_name}响应中 ...":
|
||||||
for response in self._model.chat(self._tokenizer, query, history=history, stream=True):
|
response = f"[Local Message] {model_name}响应异常 ..."
|
||||||
yield response
|
history.extend([inputs, response])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
def try_to_import_special_deps(self, **kwargs):
|
|
||||||
# import something that will raise error if the user does not install requirement_*.txt
|
|
||||||
# 🏃♂️🏃♂️🏃♂️ 主进程执行
|
|
||||||
import importlib
|
|
||||||
importlib.import_module('modelscope')
|
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
|
||||||
# 🔌💻 GPT-Academic Interface
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
|
||||||
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetONNXGLMHandle, model_name)
|
|
||||||
@@ -0,0 +1,59 @@
|
|||||||
|
model_name = "Qwen_Local"
|
||||||
|
cmd_to_install = "`pip install -r request_llms/requirements_qwen_local.txt`"
|
||||||
|
|
||||||
|
from toolbox import ProxyNetworkActivate, get_conf
|
||||||
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
|
# 🔌💻 Local Model
|
||||||
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
|
class GetQwenLMHandle(LocalLLMHandle):
|
||||||
|
|
||||||
|
def load_model_info(self):
|
||||||
|
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||||
|
self.model_name = model_name
|
||||||
|
self.cmd_to_install = cmd_to_install
|
||||||
|
|
||||||
|
def load_model_and_tokenizer(self):
|
||||||
|
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||||
|
# from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
||||||
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
from transformers.generation import GenerationConfig
|
||||||
|
with ProxyNetworkActivate('Download_LLM'):
|
||||||
|
model_id = get_conf('QWEN_LOCAL_MODEL_SELECTION')
|
||||||
|
self._tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, resume_download=True)
|
||||||
|
# use fp16
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True).eval()
|
||||||
|
model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
|
||||||
|
self._model = model
|
||||||
|
|
||||||
|
return self._model, self._tokenizer
|
||||||
|
|
||||||
|
def llm_stream_generator(self, **kwargs):
|
||||||
|
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||||
|
def adaptor(kwargs):
|
||||||
|
query = kwargs['query']
|
||||||
|
max_length = kwargs['max_length']
|
||||||
|
top_p = kwargs['top_p']
|
||||||
|
temperature = kwargs['temperature']
|
||||||
|
history = kwargs['history']
|
||||||
|
return query, max_length, top_p, temperature, history
|
||||||
|
|
||||||
|
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
||||||
|
|
||||||
|
for response in self._model.chat_stream(self._tokenizer, query, history=history):
|
||||||
|
yield response
|
||||||
|
|
||||||
|
def try_to_import_special_deps(self, **kwargs):
|
||||||
|
# import something that will raise error if the user does not install requirement_*.txt
|
||||||
|
# 🏃♂️🏃♂️🏃♂️ 主进程执行
|
||||||
|
import importlib
|
||||||
|
importlib.import_module('modelscope')
|
||||||
|
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
|
# 🔌💻 GPT-Academic Interface
|
||||||
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
|
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetQwenLMHandle, model_name)
|
||||||
67
request_llms/bridge_skylark2.py
普通文件
67
request_llms/bridge_skylark2.py
普通文件
@@ -0,0 +1,67 @@
|
|||||||
|
import time
|
||||||
|
from toolbox import update_ui, get_conf, update_ui_lastest_msg
|
||||||
|
from toolbox import check_packages, report_exception
|
||||||
|
|
||||||
|
model_name = '云雀大模型'
|
||||||
|
|
||||||
|
def validate_key():
|
||||||
|
YUNQUE_SECRET_KEY = get_conf("YUNQUE_SECRET_KEY")
|
||||||
|
if YUNQUE_SECRET_KEY == '': return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||||
|
"""
|
||||||
|
⭐ 多线程方法
|
||||||
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
|
"""
|
||||||
|
watch_dog_patience = 5
|
||||||
|
response = ""
|
||||||
|
|
||||||
|
if validate_key() is False:
|
||||||
|
raise RuntimeError('请配置YUNQUE_SECRET_KEY')
|
||||||
|
|
||||||
|
from .com_skylark2api import YUNQUERequestInstance
|
||||||
|
sri = YUNQUERequestInstance()
|
||||||
|
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
|
||||||
|
if len(observe_window) >= 1:
|
||||||
|
observe_window[0] = response
|
||||||
|
if len(observe_window) >= 2:
|
||||||
|
if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
|
||||||
|
return response
|
||||||
|
|
||||||
|
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||||
|
"""
|
||||||
|
⭐ 单线程方法
|
||||||
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
|
"""
|
||||||
|
chatbot.append((inputs, ""))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
|
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
|
try:
|
||||||
|
check_packages(["zhipuai"])
|
||||||
|
except:
|
||||||
|
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
|
||||||
|
chatbot=chatbot, history=history, delay=0)
|
||||||
|
return
|
||||||
|
|
||||||
|
if validate_key() is False:
|
||||||
|
yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置HUOSHAN_API_KEY", chatbot=chatbot, history=history, delay=0)
|
||||||
|
return
|
||||||
|
|
||||||
|
if additional_fn is not None:
|
||||||
|
from core_functional import handle_core_functionality
|
||||||
|
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||||
|
|
||||||
|
# 开始接收回复
|
||||||
|
from .com_skylark2api import YUNQUERequestInstance
|
||||||
|
sri = YUNQUERequestInstance()
|
||||||
|
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
|
||||||
|
chatbot[-1] = (inputs, response)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
|
# 总结输出
|
||||||
|
if response == f"[Local Message] 等待{model_name}响应中 ...":
|
||||||
|
response = f"[Local Message] {model_name}响应异常 ..."
|
||||||
|
history.extend([inputs, response])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
@@ -26,7 +26,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
|
|
||||||
from .com_sparkapi import SparkRequestInstance
|
from .com_sparkapi import SparkRequestInstance
|
||||||
sri = SparkRequestInstance()
|
sri = SparkRequestInstance()
|
||||||
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
|
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt, use_image_api=False):
|
||||||
if len(observe_window) >= 1:
|
if len(observe_window) >= 1:
|
||||||
observe_window[0] = response
|
observe_window[0] = response
|
||||||
if len(observe_window) >= 2:
|
if len(observe_window) >= 2:
|
||||||
@@ -52,7 +52,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
# 开始接收回复
|
# 开始接收回复
|
||||||
from .com_sparkapi import SparkRequestInstance
|
from .com_sparkapi import SparkRequestInstance
|
||||||
sri = SparkRequestInstance()
|
sri = SparkRequestInstance()
|
||||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
|
for response in sri.generate(inputs, llm_kwargs, history, system_prompt, use_image_api=True):
|
||||||
chatbot[-1] = (inputs, response)
|
chatbot[-1] = (inputs, response)
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
|
|||||||
@@ -7,14 +7,15 @@ import logging
|
|||||||
import time
|
import time
|
||||||
from toolbox import get_conf
|
from toolbox import get_conf
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
load_message = "正在加载Claude组件,请稍候..."
|
load_message = "正在加载Claude组件,请稍候..."
|
||||||
|
|
||||||
try:
|
try:
|
||||||
"""
|
"""
|
||||||
========================================================================
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
第一部分:Slack API Client
|
第一部分:Slack API Client
|
||||||
https://github.com/yokonsan/claude-in-slack-api
|
https://github.com/yokonsan/claude-in-slack-api
|
||||||
========================================================================
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from slack_sdk.errors import SlackApiError
|
from slack_sdk.errors import SlackApiError
|
||||||
@@ -23,20 +24,23 @@ try:
|
|||||||
class SlackClient(AsyncWebClient):
|
class SlackClient(AsyncWebClient):
|
||||||
"""SlackClient类用于与Slack API进行交互,实现消息发送、接收等功能。
|
"""SlackClient类用于与Slack API进行交互,实现消息发送、接收等功能。
|
||||||
|
|
||||||
属性:
|
属性:
|
||||||
- CHANNEL_ID:str类型,表示频道ID。
|
- CHANNEL_ID:str类型,表示频道ID。
|
||||||
|
|
||||||
方法:
|
方法:
|
||||||
- open_channel():异步方法。通过调用conversations_open方法打开一个频道,并将返回的频道ID保存在属性CHANNEL_ID中。
|
- open_channel():异步方法。通过调用conversations_open方法打开一个频道,并将返回的频道ID保存在属性CHANNEL_ID中。
|
||||||
- chat(text: str):异步方法。向已打开的频道发送一条文本消息。
|
- chat(text: str):异步方法。向已打开的频道发送一条文本消息。
|
||||||
- get_slack_messages():异步方法。获取已打开频道的最新消息并返回消息列表,目前不支持历史消息查询。
|
- get_slack_messages():异步方法。获取已打开频道的最新消息并返回消息列表,目前不支持历史消息查询。
|
||||||
- get_reply():异步方法。循环监听已打开频道的消息,如果收到"Typing…_"结尾的消息说明Claude还在继续输出,否则结束循环。
|
- get_reply():异步方法。循环监听已打开频道的消息,如果收到"Typing…_"结尾的消息说明Claude还在继续输出,否则结束循环。
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
CHANNEL_ID = None
|
CHANNEL_ID = None
|
||||||
|
|
||||||
async def open_channel(self):
|
async def open_channel(self):
|
||||||
response = await self.conversations_open(users=get_conf('SLACK_CLAUDE_BOT_ID'))
|
response = await self.conversations_open(
|
||||||
|
users=get_conf("SLACK_CLAUDE_BOT_ID")
|
||||||
|
)
|
||||||
self.CHANNEL_ID = response["channel"]["id"]
|
self.CHANNEL_ID = response["channel"]["id"]
|
||||||
|
|
||||||
async def chat(self, text):
|
async def chat(self, text):
|
||||||
@@ -49,9 +53,14 @@ try:
|
|||||||
async def get_slack_messages(self):
|
async def get_slack_messages(self):
|
||||||
try:
|
try:
|
||||||
# TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题
|
# TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题
|
||||||
resp = await self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1)
|
resp = await self.conversations_history(
|
||||||
msg = [msg for msg in resp["messages"]
|
channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1
|
||||||
if msg.get("user") == get_conf('SLACK_CLAUDE_BOT_ID')]
|
)
|
||||||
|
msg = [
|
||||||
|
msg
|
||||||
|
for msg in resp["messages"]
|
||||||
|
if msg.get("user") == get_conf("SLACK_CLAUDE_BOT_ID")
|
||||||
|
]
|
||||||
return msg
|
return msg
|
||||||
except (SlackApiError, KeyError) as e:
|
except (SlackApiError, KeyError) as e:
|
||||||
raise RuntimeError(f"获取Slack消息失败。")
|
raise RuntimeError(f"获取Slack消息失败。")
|
||||||
@@ -69,13 +78,14 @@ try:
|
|||||||
else:
|
else:
|
||||||
yield True, msg["text"]
|
yield True, msg["text"]
|
||||||
break
|
break
|
||||||
|
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
"""
|
"""
|
||||||
========================================================================
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
第二部分:子进程Worker(调用主体)
|
第二部分:子进程Worker(调用主体)
|
||||||
========================================================================
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
@@ -96,6 +106,7 @@ class ClaudeHandle(Process):
|
|||||||
try:
|
try:
|
||||||
self.success = False
|
self.success = False
|
||||||
import slack_sdk
|
import slack_sdk
|
||||||
|
|
||||||
self.info = "依赖检测通过,等待Claude响应。注意目前不能多人同时调用Claude接口(有线程锁),否则将导致每个人的Claude问询历史互相渗透。调用Claude时,会自动使用已配置的代理。"
|
self.info = "依赖检测通过,等待Claude响应。注意目前不能多人同时调用Claude接口(有线程锁),否则将导致每个人的Claude问询历史互相渗透。调用Claude时,会自动使用已配置的代理。"
|
||||||
self.success = True
|
self.success = True
|
||||||
except:
|
except:
|
||||||
@@ -110,15 +121,15 @@ class ClaudeHandle(Process):
|
|||||||
while True:
|
while True:
|
||||||
# 等待
|
# 等待
|
||||||
kwargs = self.child.recv()
|
kwargs = self.child.recv()
|
||||||
question = kwargs['query']
|
question = kwargs["query"]
|
||||||
history = kwargs['history']
|
history = kwargs["history"]
|
||||||
|
|
||||||
# 开始问问题
|
# 开始问问题
|
||||||
prompt = ""
|
prompt = ""
|
||||||
|
|
||||||
# 问题
|
# 问题
|
||||||
prompt += question
|
prompt += question
|
||||||
print('question:', prompt)
|
print("question:", prompt)
|
||||||
|
|
||||||
# 提交
|
# 提交
|
||||||
await self.claude_model.chat(prompt)
|
await self.claude_model.chat(prompt)
|
||||||
@@ -131,11 +142,15 @@ class ClaudeHandle(Process):
|
|||||||
else:
|
else:
|
||||||
# 防止丢失最后一条消息
|
# 防止丢失最后一条消息
|
||||||
slack_msgs = await self.claude_model.get_slack_messages()
|
slack_msgs = await self.claude_model.get_slack_messages()
|
||||||
last_msg = slack_msgs[-1]["text"] if slack_msgs and len(slack_msgs) > 0 else ""
|
last_msg = (
|
||||||
|
slack_msgs[-1]["text"]
|
||||||
|
if slack_msgs and len(slack_msgs) > 0
|
||||||
|
else ""
|
||||||
|
)
|
||||||
if last_msg:
|
if last_msg:
|
||||||
self.child.send(last_msg)
|
self.child.send(last_msg)
|
||||||
print('-------- receive final ---------')
|
print("-------- receive final ---------")
|
||||||
self.child.send('[Finish]')
|
self.child.send("[Finish]")
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
"""
|
"""
|
||||||
@@ -146,22 +161,24 @@ class ClaudeHandle(Process):
|
|||||||
self.local_history = []
|
self.local_history = []
|
||||||
if (self.claude_model is None) or (not self.success):
|
if (self.claude_model is None) or (not self.success):
|
||||||
# 代理设置
|
# 代理设置
|
||||||
proxies = get_conf('proxies')
|
proxies = get_conf("proxies")
|
||||||
if proxies is None:
|
if proxies is None:
|
||||||
self.proxies_https = None
|
self.proxies_https = None
|
||||||
else:
|
else:
|
||||||
self.proxies_https = proxies['https']
|
self.proxies_https = proxies["https"]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
SLACK_CLAUDE_USER_TOKEN = get_conf('SLACK_CLAUDE_USER_TOKEN')
|
SLACK_CLAUDE_USER_TOKEN = get_conf("SLACK_CLAUDE_USER_TOKEN")
|
||||||
self.claude_model = SlackClient(token=SLACK_CLAUDE_USER_TOKEN, proxy=self.proxies_https)
|
self.claude_model = SlackClient(
|
||||||
print('Claude组件初始化成功。')
|
token=SLACK_CLAUDE_USER_TOKEN, proxy=self.proxies_https
|
||||||
|
)
|
||||||
|
print("Claude组件初始化成功。")
|
||||||
except:
|
except:
|
||||||
self.success = False
|
self.success = False
|
||||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
|
||||||
self.child.send(f'[Local Message] 不能加载Claude组件。{tb_str}')
|
self.child.send(f"[Local Message] 不能加载Claude组件。{tb_str}")
|
||||||
self.child.send('[Fail]')
|
self.child.send("[Fail]")
|
||||||
self.child.send('[Finish]')
|
self.child.send("[Finish]")
|
||||||
raise RuntimeError(f"不能加载Claude组件。")
|
raise RuntimeError(f"不能加载Claude组件。")
|
||||||
|
|
||||||
self.success = True
|
self.success = True
|
||||||
@@ -169,42 +186,49 @@ class ClaudeHandle(Process):
|
|||||||
# 进入任务等待状态
|
# 进入任务等待状态
|
||||||
asyncio.run(self.async_run())
|
asyncio.run(self.async_run())
|
||||||
except Exception:
|
except Exception:
|
||||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
|
||||||
self.child.send(f'[Local Message] Claude失败 {tb_str}.')
|
self.child.send(f"[Local Message] Claude失败 {tb_str}.")
|
||||||
self.child.send('[Fail]')
|
self.child.send("[Fail]")
|
||||||
self.child.send('[Finish]')
|
self.child.send("[Finish]")
|
||||||
|
|
||||||
def stream_chat(self, **kwargs):
|
def stream_chat(self, **kwargs):
|
||||||
"""
|
"""
|
||||||
这个函数运行在主进程
|
这个函数运行在主进程
|
||||||
"""
|
"""
|
||||||
self.threadLock.acquire()
|
self.threadLock.acquire()
|
||||||
self.parent.send(kwargs) # 发送请求到子进程
|
self.parent.send(kwargs) # 发送请求到子进程
|
||||||
while True:
|
while True:
|
||||||
res = self.parent.recv() # 等待Claude回复的片段
|
res = self.parent.recv() # 等待Claude回复的片段
|
||||||
if res == '[Finish]':
|
if res == "[Finish]":
|
||||||
break # 结束
|
break # 结束
|
||||||
elif res == '[Fail]':
|
elif res == "[Fail]":
|
||||||
self.success = False
|
self.success = False
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
yield res # Claude回复的片段
|
yield res # Claude回复的片段
|
||||||
self.threadLock.release()
|
self.threadLock.release()
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
========================================================================
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
第三部分:主进程统一调用函数接口
|
第三部分:主进程统一调用函数接口
|
||||||
========================================================================
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
"""
|
"""
|
||||||
global claude_handle
|
global claude_handle
|
||||||
claude_handle = None
|
claude_handle = None
|
||||||
|
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
def predict_no_ui_long_connection(
|
||||||
|
inputs,
|
||||||
|
llm_kwargs,
|
||||||
|
history=[],
|
||||||
|
sys_prompt="",
|
||||||
|
observe_window=None,
|
||||||
|
console_slience=False,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
多线程方法
|
多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
"""
|
"""
|
||||||
global claude_handle
|
global claude_handle
|
||||||
if (claude_handle is None) or (not claude_handle.success):
|
if (claude_handle is None) or (not claude_handle.success):
|
||||||
@@ -217,24 +241,40 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
|
|
||||||
# 没有 sys_prompt 接口,因此把prompt加入 history
|
# 没有 sys_prompt 接口,因此把prompt加入 history
|
||||||
history_feedin = []
|
history_feedin = []
|
||||||
for i in range(len(history)//2):
|
for i in range(len(history) // 2):
|
||||||
history_feedin.append([history[2*i], history[2*i+1]])
|
history_feedin.append([history[2 * i], history[2 * i + 1]])
|
||||||
|
|
||||||
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
||||||
response = ""
|
response = ""
|
||||||
observe_window[0] = "[Local Message] 等待Claude响应中 ..."
|
observe_window[0] = "[Local Message] 等待Claude响应中 ..."
|
||||||
for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
for response in claude_handle.stream_chat(
|
||||||
|
query=inputs,
|
||||||
|
history=history_feedin,
|
||||||
|
system_prompt=sys_prompt,
|
||||||
|
max_length=llm_kwargs["max_length"],
|
||||||
|
top_p=llm_kwargs["top_p"],
|
||||||
|
temperature=llm_kwargs["temperature"],
|
||||||
|
):
|
||||||
observe_window[0] = preprocess_newbing_out_simple(response)
|
observe_window[0] = preprocess_newbing_out_simple(response)
|
||||||
if len(observe_window) >= 2:
|
if len(observe_window) >= 2:
|
||||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
if (time.time() - observe_window[1]) > watch_dog_patience:
|
||||||
raise RuntimeError("程序终止。")
|
raise RuntimeError("程序终止。")
|
||||||
return preprocess_newbing_out_simple(response)
|
return preprocess_newbing_out_simple(response)
|
||||||
|
|
||||||
|
|
||||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
|
def predict(
|
||||||
|
inputs,
|
||||||
|
llm_kwargs,
|
||||||
|
plugin_kwargs,
|
||||||
|
chatbot,
|
||||||
|
history=[],
|
||||||
|
system_prompt="",
|
||||||
|
stream=True,
|
||||||
|
additional_fn=None,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
单线程方法
|
单线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
"""
|
"""
|
||||||
chatbot.append((inputs, "[Local Message] 等待Claude响应中 ..."))
|
chatbot.append((inputs, "[Local Message] 等待Claude响应中 ..."))
|
||||||
|
|
||||||
@@ -249,21 +289,30 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
|
|
||||||
if additional_fn is not None:
|
if additional_fn is not None:
|
||||||
from core_functional import handle_core_functionality
|
from core_functional import handle_core_functionality
|
||||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
|
||||||
|
inputs, history = handle_core_functionality(
|
||||||
|
additional_fn, inputs, history, chatbot
|
||||||
|
)
|
||||||
|
|
||||||
history_feedin = []
|
history_feedin = []
|
||||||
for i in range(len(history)//2):
|
for i in range(len(history) // 2):
|
||||||
history_feedin.append([history[2*i], history[2*i+1]])
|
history_feedin.append([history[2 * i], history[2 * i + 1]])
|
||||||
|
|
||||||
chatbot[-1] = (inputs, "[Local Message] 等待Claude响应中 ...")
|
chatbot[-1] = (inputs, "[Local Message] 等待Claude响应中 ...")
|
||||||
response = "[Local Message] 等待Claude响应中 ..."
|
response = "[Local Message] 等待Claude响应中 ..."
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
|
yield from update_ui(
|
||||||
for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt):
|
chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。"
|
||||||
|
)
|
||||||
|
for response in claude_handle.stream_chat(
|
||||||
|
query=inputs, history=history_feedin, system_prompt=system_prompt
|
||||||
|
):
|
||||||
chatbot[-1] = (inputs, preprocess_newbing_out(response))
|
chatbot[-1] = (inputs, preprocess_newbing_out(response))
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
|
yield from update_ui(
|
||||||
|
chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。"
|
||||||
|
)
|
||||||
if response == "[Local Message] 等待Claude响应中 ...":
|
if response == "[Local Message] 等待Claude响应中 ...":
|
||||||
response = "[Local Message] Claude响应异常,请刷新界面重试 ..."
|
response = "[Local Message] Claude响应异常,请刷新界面重试 ..."
|
||||||
history.extend([inputs, response])
|
history.extend([inputs, response])
|
||||||
logging.info(f'[raw_input] {inputs}')
|
logging.info(f"[raw_input] {inputs}")
|
||||||
logging.info(f'[response] {response}')
|
logging.info(f"[response] {response}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")
|
yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
|
|
||||||
import time
|
import time
|
||||||
from toolbox import update_ui, get_conf, update_ui_lastest_msg
|
from toolbox import update_ui, get_conf, update_ui_lastest_msg
|
||||||
|
from toolbox import check_packages, report_exception
|
||||||
|
|
||||||
model_name = '智谱AI大模型'
|
model_name = '智谱AI大模型'
|
||||||
|
|
||||||
@@ -37,6 +38,14 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
chatbot.append((inputs, ""))
|
chatbot.append((inputs, ""))
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
|
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
|
try:
|
||||||
|
check_packages(["zhipuai"])
|
||||||
|
except:
|
||||||
|
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install zhipuai==1.0.7```。",
|
||||||
|
chatbot=chatbot, history=history, delay=0)
|
||||||
|
return
|
||||||
|
|
||||||
if validate_key() is False:
|
if validate_key() is False:
|
||||||
yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置ZHIPUAI_API_KEY", chatbot=chatbot, history=history, delay=0)
|
yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置ZHIPUAI_API_KEY", chatbot=chatbot, history=history, delay=0)
|
||||||
return
|
return
|
||||||
|
|||||||
229
request_llms/com_google.py
普通文件
229
request_llms/com_google.py
普通文件
@@ -0,0 +1,229 @@
|
|||||||
|
# encoding: utf-8
|
||||||
|
# @Time : 2023/12/25
|
||||||
|
# @Author : Spike
|
||||||
|
# @Descr :
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import requests
|
||||||
|
from typing import List, Dict, Tuple
|
||||||
|
from toolbox import get_conf, encode_image, get_pictures_list
|
||||||
|
|
||||||
|
proxies, TIMEOUT_SECONDS = get_conf("proxies", "TIMEOUT_SECONDS")
|
||||||
|
|
||||||
|
"""
|
||||||
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
|
第五部分 一些文件处理方法
|
||||||
|
files_filter_handler 根据type过滤文件
|
||||||
|
input_encode_handler 提取input中的文件,并解析
|
||||||
|
file_manifest_filter_html 根据type过滤文件, 并解析为html or md 文本
|
||||||
|
link_mtime_to_md 文件增加本地时间参数,避免下载到缓存文件
|
||||||
|
html_view_blank 超链接
|
||||||
|
html_local_file 本地文件取相对路径
|
||||||
|
to_markdown_tabs 文件list 转换为 md tab
|
||||||
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def files_filter_handler(file_list):
|
||||||
|
new_list = []
|
||||||
|
filter_ = [
|
||||||
|
"png",
|
||||||
|
"jpg",
|
||||||
|
"jpeg",
|
||||||
|
"bmp",
|
||||||
|
"svg",
|
||||||
|
"webp",
|
||||||
|
"ico",
|
||||||
|
"tif",
|
||||||
|
"tiff",
|
||||||
|
"raw",
|
||||||
|
"eps",
|
||||||
|
]
|
||||||
|
for file in file_list:
|
||||||
|
file = str(file).replace("file=", "")
|
||||||
|
if os.path.exists(file):
|
||||||
|
if str(os.path.basename(file)).split(".")[-1] in filter_:
|
||||||
|
new_list.append(file)
|
||||||
|
return new_list
|
||||||
|
|
||||||
|
|
||||||
|
def input_encode_handler(inputs, llm_kwargs):
|
||||||
|
if llm_kwargs["most_recent_uploaded"].get("path"):
|
||||||
|
image_paths = get_pictures_list(llm_kwargs["most_recent_uploaded"]["path"])
|
||||||
|
md_encode = []
|
||||||
|
for md_path in image_paths:
|
||||||
|
type_ = os.path.splitext(md_path)[1].replace(".", "")
|
||||||
|
type_ = "jpeg" if type_ == "jpg" else type_
|
||||||
|
md_encode.append({"data": encode_image(md_path), "type": type_})
|
||||||
|
return inputs, md_encode
|
||||||
|
|
||||||
|
|
||||||
|
def file_manifest_filter_html(file_list, filter_: list = None, md_type=False):
|
||||||
|
new_list = []
|
||||||
|
if not filter_:
|
||||||
|
filter_ = [
|
||||||
|
"png",
|
||||||
|
"jpg",
|
||||||
|
"jpeg",
|
||||||
|
"bmp",
|
||||||
|
"svg",
|
||||||
|
"webp",
|
||||||
|
"ico",
|
||||||
|
"tif",
|
||||||
|
"tiff",
|
||||||
|
"raw",
|
||||||
|
"eps",
|
||||||
|
]
|
||||||
|
for file in file_list:
|
||||||
|
if str(os.path.basename(file)).split(".")[-1] in filter_:
|
||||||
|
new_list.append(html_local_img(file, md=md_type))
|
||||||
|
elif os.path.exists(file):
|
||||||
|
new_list.append(link_mtime_to_md(file))
|
||||||
|
else:
|
||||||
|
new_list.append(file)
|
||||||
|
return new_list
|
||||||
|
|
||||||
|
|
||||||
|
def link_mtime_to_md(file):
|
||||||
|
link_local = html_local_file(file)
|
||||||
|
link_name = os.path.basename(file)
|
||||||
|
a = f"[{link_name}]({link_local}?{os.path.getmtime(file)})"
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def html_local_file(file):
|
||||||
|
base_path = os.path.dirname(__file__) # 项目目录
|
||||||
|
if os.path.exists(str(file)):
|
||||||
|
file = f'file={file.replace(base_path, ".")}'
|
||||||
|
return file
|
||||||
|
|
||||||
|
|
||||||
|
def html_local_img(__file, layout="left", max_width=None, max_height=None, md=True):
|
||||||
|
style = ""
|
||||||
|
if max_width is not None:
|
||||||
|
style += f"max-width: {max_width};"
|
||||||
|
if max_height is not None:
|
||||||
|
style += f"max-height: {max_height};"
|
||||||
|
__file = html_local_file(__file)
|
||||||
|
a = f'<div align="{layout}"><img src="{__file}" style="{style}"></div>'
|
||||||
|
if md:
|
||||||
|
a = f""
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def to_markdown_tabs(head: list, tabs: list, alignment=":---:", column=False):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
head: 表头:[]
|
||||||
|
tabs: 表值:[[列1], [列2], [列3], [列4]]
|
||||||
|
alignment: :--- 左对齐, :---: 居中对齐, ---: 右对齐
|
||||||
|
column: True to keep data in columns, False to keep data in rows (default).
|
||||||
|
Returns:
|
||||||
|
A string representation of the markdown table.
|
||||||
|
"""
|
||||||
|
if column:
|
||||||
|
transposed_tabs = list(map(list, zip(*tabs)))
|
||||||
|
else:
|
||||||
|
transposed_tabs = tabs
|
||||||
|
# Find the maximum length among the columns
|
||||||
|
max_len = max(len(column) for column in transposed_tabs)
|
||||||
|
|
||||||
|
tab_format = "| %s "
|
||||||
|
tabs_list = "".join([tab_format % i for i in head]) + "|\n"
|
||||||
|
tabs_list += "".join([tab_format % alignment for i in head]) + "|\n"
|
||||||
|
|
||||||
|
for i in range(max_len):
|
||||||
|
row_data = [tab[i] if i < len(tab) else "" for tab in transposed_tabs]
|
||||||
|
row_data = file_manifest_filter_html(row_data, filter_=None)
|
||||||
|
tabs_list += "".join([tab_format % i for i in row_data]) + "|\n"
|
||||||
|
|
||||||
|
return tabs_list
|
||||||
|
|
||||||
|
|
||||||
|
class GoogleChatInit:
|
||||||
|
def __init__(self):
|
||||||
|
self.url_gemini = "https://generativelanguage.googleapis.com/v1beta/models/%m:streamGenerateContent?key=%k"
|
||||||
|
|
||||||
|
def generate_chat(self, inputs, llm_kwargs, history, system_prompt):
|
||||||
|
headers, payload = self.generate_message_payload(
|
||||||
|
inputs, llm_kwargs, history, system_prompt
|
||||||
|
)
|
||||||
|
response = requests.post(
|
||||||
|
url=self.url_gemini,
|
||||||
|
headers=headers,
|
||||||
|
data=json.dumps(payload),
|
||||||
|
stream=True,
|
||||||
|
proxies=proxies,
|
||||||
|
timeout=TIMEOUT_SECONDS,
|
||||||
|
)
|
||||||
|
return response.iter_lines()
|
||||||
|
|
||||||
|
def __conversation_user(self, user_input, llm_kwargs):
|
||||||
|
what_i_have_asked = {"role": "user", "parts": []}
|
||||||
|
if "vision" not in self.url_gemini:
|
||||||
|
input_ = user_input
|
||||||
|
encode_img = []
|
||||||
|
else:
|
||||||
|
input_, encode_img = input_encode_handler(user_input, llm_kwargs=llm_kwargs)
|
||||||
|
what_i_have_asked["parts"].append({"text": input_})
|
||||||
|
if encode_img:
|
||||||
|
for data in encode_img:
|
||||||
|
what_i_have_asked["parts"].append(
|
||||||
|
{
|
||||||
|
"inline_data": {
|
||||||
|
"mime_type": f"image/{data['type']}",
|
||||||
|
"data": data["data"],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return what_i_have_asked
|
||||||
|
|
||||||
|
def __conversation_history(self, history, llm_kwargs):
|
||||||
|
messages = []
|
||||||
|
conversation_cnt = len(history) // 2
|
||||||
|
if conversation_cnt:
|
||||||
|
for index in range(0, 2 * conversation_cnt, 2):
|
||||||
|
what_i_have_asked = self.__conversation_user(history[index], llm_kwargs)
|
||||||
|
what_gpt_answer = {
|
||||||
|
"role": "model",
|
||||||
|
"parts": [{"text": history[index + 1]}],
|
||||||
|
}
|
||||||
|
messages.append(what_i_have_asked)
|
||||||
|
messages.append(what_gpt_answer)
|
||||||
|
return messages
|
||||||
|
|
||||||
|
def generate_message_payload(
|
||||||
|
self, inputs, llm_kwargs, history, system_prompt
|
||||||
|
) -> Tuple[Dict, Dict]:
|
||||||
|
messages = [
|
||||||
|
# {"role": "system", "parts": [{"text": system_prompt}]}, # gemini 不允许对话轮次为偶数,所以这个没有用,看后续支持吧。。。
|
||||||
|
# {"role": "user", "parts": [{"text": ""}]},
|
||||||
|
# {"role": "model", "parts": [{"text": ""}]}
|
||||||
|
]
|
||||||
|
self.url_gemini = self.url_gemini.replace(
|
||||||
|
"%m", llm_kwargs["llm_model"]
|
||||||
|
).replace("%k", get_conf("GEMINI_API_KEY"))
|
||||||
|
header = {"Content-Type": "application/json"}
|
||||||
|
if "vision" not in self.url_gemini: # 不是vision 才处理history
|
||||||
|
messages.extend(
|
||||||
|
self.__conversation_history(history, llm_kwargs)
|
||||||
|
) # 处理 history
|
||||||
|
messages.append(self.__conversation_user(inputs, llm_kwargs)) # 处理用户对话
|
||||||
|
payload = {
|
||||||
|
"contents": messages,
|
||||||
|
"generationConfig": {
|
||||||
|
# "maxOutputTokens": 800,
|
||||||
|
"stopSequences": str(llm_kwargs.get("stop", "")).split(" "),
|
||||||
|
"temperature": llm_kwargs.get("temperature", 1),
|
||||||
|
"topP": llm_kwargs.get("top_p", 0.8),
|
||||||
|
"topK": 10,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return header, payload
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
google = GoogleChatInit()
|
||||||
|
# print(gootle.generate_message_payload('你好呀', {}, ['123123', '3123123'], ''))
|
||||||
|
# gootle.input_encode_handle('123123[123123](./123123), ')
|
||||||
某些文件未显示,因为此 diff 中更改的文件太多 显示更多
在新工单中引用
屏蔽一个用户