比较提交

...

172 次代码提交

作者 SHA1 备注 提交日期
binary-husky
f35f6633e0 fix: welcome card flip bug 2024-08-02 11:20:41 +00:00
hongyi-zhao
573dc4d184 Add claude-3-5-sonnet-20240620 (#1907)
See https://docs.anthropic.com/en/docs/about-claude/models#model-names fore model names.
2024-08-02 18:04:42 +08:00
binary-husky
da8b2d69ce update version 3.8 2024-08-02 10:02:04 +00:00
binary-husky
58e732c26f Merge branch 'frontier' 2024-08-02 09:50:40 +00:00
Menghuan1918
ca238daa8c 改进联网搜索插件-新增搜索模式,搜索增强 (#1874)
* Change default to Mixed option

* Add option optimizer

* Add search optimizer prompts

* Enhanced Processing

* Finish search_optimizer part

* prompts bug fix

* Bug fix
2024-07-23 00:55:48 +08:00
jiangfy-ihep
60b3491513 add gpt-4o-mini (#1904)
Co-authored-by: Fayu Jiang <jiangfayu@hotmail.com>
2024-07-23 00:55:34 +08:00
binary-husky
c1175bfb7d add flip card animation 2024-07-22 04:53:59 +00:00
binary-husky
b705afd5ff welcome menu bug fix 2024-07-22 04:35:52 +00:00
binary-husky
dfcd28abce add width_to_hide_welcome 2024-07-22 03:34:35 +00:00
binary-husky
1edaa9e234 hide when too narrow 2024-07-21 15:04:38 +00:00
binary-husky
f0cd617ec2 minor css improve 2024-07-20 10:29:47 +00:00
binary-husky
0b08bb2cea update svg 2024-07-20 07:15:08 +00:00
Keldos
d1f8607ac8 Update submit button dropdown style (#1900) 2024-07-20 14:50:56 +08:00
binary-husky
7eb68a2086 tune 2024-07-17 17:16:34 +00:00
binary-husky
ee9e99036a Merge branch 'frontier' of github.com:binary-husky/chatgpt_academic into frontier 2024-07-17 17:14:49 +00:00
binary-husky
55e255220b update 2024-07-17 17:12:32 +00:00
lbykkkk
019cd26ae8 Merge branch 'frontier' of https://github.com/binary-husky/gpt_academic into frontier 2024-07-18 00:35:51 +08:00
lbykkkk
a5b21d5cc0 修改content并统一logo颜色 2024-07-18 00:35:40 +08:00
binary-husky
ce940ff70f roll welcome msg 2024-07-17 16:34:24 +00:00
binary-husky
fc6a83c29f update 2024-07-17 15:44:08 +00:00
binary-husky
1d3212e367 reverse welcome msg 2024-07-17 15:43:41 +00:00
lbykkkk
8a835352a3 更新欢迎界面的用语和logo 2024-07-17 19:49:07 +08:00
binary-husky
5456c9fa43 improve welcome UI 2024-07-16 16:23:07 +00:00
binary-husky
ea67054c30 update chuanhu theme 2024-07-16 16:07:46 +00:00
binary-husky
1084108df6 adding welcome page 2024-07-16 10:41:25 +00:00
binary-husky
40c9700a8d add welcome page 2024-07-15 15:47:24 +00:00
binary-husky
6da5623813 多用途复用提交按钮 2024-07-15 04:23:43 +00:00
binary-husky
778c9cd9ec roll version 2024-07-15 03:29:56 +00:00
binary-husky
e290317146 proxy submit btn 2024-07-15 03:28:59 +00:00
binary-husky
85b92b7f07 move python comment agent to dropdown 2024-07-13 16:26:36 +00:00
binary-husky
ff899777ce improve source code comment plugin functionality 2024-07-13 16:20:17 +00:00
binary-husky
c1b8c773c3 stage compare source code comment 2024-07-13 15:28:53 +00:00
binary-husky
8747c48175 mt improvement 2024-07-12 08:26:40 +00:00
binary-husky
c0010c88bc implement auto comment 2024-07-12 07:36:40 +00:00
binary-husky
68838da8ad finish test 2024-07-12 04:19:07 +00:00
binary-husky
ca7de8fcdd version up 2024-07-10 02:00:36 +00:00
binary-husky
7ebc2d00e7 Merge branch 'master' into frontier 2024-07-09 03:19:35 +00:00
binary-husky
47fb81cfde Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2024-07-09 03:18:19 +00:00
binary-husky
83961c1002 optimize image generation fn 2024-07-09 03:18:14 +00:00
binary-husky
a8621333af js impl bug fix 2024-07-08 15:50:12 +00:00
binary-husky
f402ef8134 hide ask btn 2024-07-08 15:15:30 +00:00
binary-husky
65d0f486f1 change cache to lru_cache for lower python version 2024-07-07 16:02:05 +00:00
binary-husky
41f25a6a9b Merge branch 'bold_frontier' into frontier 2024-07-04 14:16:08 +00:00
binary-husky
4a6a032334 ignore 2024-07-04 14:14:49 +00:00
binary-husky
f945a7bd19 preserve theme selection 2024-07-04 14:11:51 +00:00
binary-husky
379dcb2fa7 minor gui bug fix 2024-07-04 13:31:21 +00:00
Menghuan1918
114192e025 Bug fix: can not chat with deepseek (#1879) 2024-07-04 20:28:53 +08:00
binary-husky
30c905917a unify plugin calling 2024-07-02 15:32:40 +00:00
binary-husky
0c6c357e9c revise qwen 2024-07-02 14:22:45 +00:00
binary-husky
9d11b17f25 Merge branch 'master' into frontier 2024-07-02 08:06:34 +00:00
binary-husky
1d9e9fa6a1 new page btn 2024-07-01 16:27:23 +00:00
Menghuan1918
6cd2d80dfd Bug fix: Some non-standard forms of error return are not caught (#1877) 2024-07-01 20:35:49 +08:00
binary-husky
18d3245fc9 ready next gradio version 2024-06-29 15:29:48 +00:00
hcy2206
194e665a3b 增加了对于讯飞星火大模型Spark4.0的支持 (#1875) 2024-06-29 23:20:04 +08:00
binary-husky
7e201c5028 move test file to correct position 2024-06-28 08:23:40 +00:00
binary-husky
6babcb4a9c Merge branch 'master' into frontier 2024-06-27 06:52:03 +00:00
binary-husky
00e5a31b50 Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2024-06-27 06:50:06 +00:00
binary-husky
d8b9686eeb fix latex auto correct 2024-06-27 06:49:36 +00:00
binary-husky
b7b4e201cb fix latex auto correct 2024-06-27 06:49:10 +00:00
binary-husky
26e7677dc3 fix new api for taichu 2024-06-26 15:18:11 +00:00
Menghuan1918
25e06de1b6 Docker build bug fix (#1870) 2024-06-26 14:31:31 +08:00
binary-husky
5e64a50898 Merge branch 'master' into frontier 2024-06-25 11:43:40 +00:00
binary-husky
0ad571e6b5 prevent further stream when reset is clicked 2024-06-25 11:43:14 +00:00
binary-husky
60a42fb070 Merge branch 'master' into frontier 2024-06-25 11:14:32 +00:00
binary-husky
ddad5247fc upgrade searxng 2024-06-25 11:12:51 +00:00
binary-husky
c94d5054a2 move fn 2024-06-25 08:53:28 +00:00
binary-husky
ececfb9b6e test new dropdown js code 2024-06-25 08:34:50 +00:00
binary-husky
9f13c5cedf update default value of scroller_max_len 2024-06-25 05:34:55 +00:00
binary-husky
68b36042ce re-locate plugin 2024-06-25 05:32:20 +00:00
binary-husky
cac6c50d2f roll version 2024-06-19 12:56:23 +00:00
binary-husky
f884eb43cf Merge branch 'master' into frontier 2024-06-19 12:56:04 +00:00
binary-husky
d37383dd4e change arxiv cache dir path 2024-06-19 12:49:34 +00:00
binary-husky
dfae4e8081 optimize scolling visual effect 2024-06-19 12:42:11 +00:00
binary-husky
15cc08505f resolve safe pickle err 2024-06-19 11:59:47 +00:00
iluem
c5a82f6ab7 Merge pull request from GHSA-3jrq-66fm-w7xr 2024-06-19 14:29:21 +08:00
binary-husky
768ed4514a minor formatting issue 2024-06-18 14:51:53 +00:00
binary-husky
9dfbff7fd0 Merge branch 'GHSA-3jrq-66fm-w7xr' into frontier 2024-06-18 10:19:10 +00:00
binary-husky
47cedde954 fix security issue GHSA-3jrq-66fm-w7xr 2024-06-18 10:18:33 +00:00
binary-husky
1e16485087 internet gpt minor bug fix 2024-06-16 15:16:24 +00:00
binary-husky
f3660d669f internet GPT upgrade 2024-06-16 14:10:38 +00:00
binary-husky
e6d1cb09cb Merge branch 'master' into frontier 2024-06-16 13:47:15 +00:00
binary-husky
12aebf9707 searxng based information gathering 2024-06-16 12:12:57 +00:00
binary-husky
0b5385e5e5 Merge branch 'master' of github.com:binary-husky/chatgpt_academic 2024-06-12 09:34:12 +00:00
binary-husky
2ff1a1fb0b update translation matrix 2024-06-12 09:34:05 +00:00
Yuki
cdadd38cf7 ️feat: block access to openapi references while running under fastapi (#1849)
- block fastapi openapi reference(swagger and redoc) routes
2024-06-10 22:26:46 +08:00
binary-husky
48e10fb10a Update README.md 2024-06-10 22:22:04 +08:00
binary-husky
ba484c55a0 Merge branch 'master' into frontier 2024-06-10 14:19:26 +00:00
Frank Lee
ca64a592f5 Update zhipu models (#1852) 2024-06-10 22:17:51 +08:00
Guoxin Sun
cb96ca132a Update common.js (#1854)
fix typo
2024-06-10 22:17:27 +08:00
binary-husky
737101b81d remove debug msg 2024-06-07 17:00:05 +00:00
binary-husky
612caa2f5f revise 2024-06-07 16:50:27 +00:00
binary-husky
85dbe4a4bf pdf processing improvement 2024-06-07 15:53:08 +00:00
binary-husky
2262a4d80a taichu model fix 2024-06-06 09:35:05 +00:00
binary-husky
b456ff02ab add note 2024-06-06 09:14:32 +00:00
binary-husky
24a21ae320 紫东太初大模型 2024-06-06 09:05:06 +00:00
binary-husky
3d5790cc2c resolve fallback to non-multimodal problem 2024-06-06 08:00:30 +00:00
binary-husky
7de6015800 multimodal support for gpt-4o etc 2024-06-06 07:36:37 +00:00
binary-husky
46428b7c7a Merge branch 'master' into frontier 2024-06-01 16:22:32 +00:00
binary-husky
66a50c8019 live2d shutdown bug fix 2024-06-01 16:21:04 +00:00
Menghuan1918
814dc943ac 将“生成多种图表”插件高级参数更新为二级菜单 (#1839)
* Improve the prompts

* Update to new meun form

* Bug fix (wrong type of plugin_kwargs)
2024-06-01 13:34:33 +08:00
binary-husky
96cd1f0b25 secondary menu main input sync bug fix 2024-05-31 04:13:27 +00:00
binary-husky
4fc17f4add Merge branch 'master' into frontier 2024-05-30 15:00:44 +00:00
binary-husky
b3665d8fec remove check 2024-05-30 14:54:50 +00:00
binary-husky
80c4281888 TTS Default Enable 2024-05-30 14:27:18 +00:00
binary-husky
beda56abb0 update dockerfile 2024-05-30 12:44:17 +00:00
binary-husky
cb16941d01 update css 2024-05-30 12:35:47 +00:00
binary-husky
5cf9ac7849 Merge branch 'master' into frontier 2024-05-29 16:06:28 +00:00
binary-husky
51ddb88ceb correct hint err 2024-05-29 16:05:23 +00:00
binary-husky
69dfe5d514 compat to old void-terminal plugin 2024-05-29 15:50:00 +00:00
binary-husky
6819f87512 Merge branch 'frontier' of github.com:binary-husky/chatgpt_academic into frontier 2024-05-23 16:35:20 +00:00
binary-husky
3d51b9d5bb compat baichuan 2024-05-23 16:35:15 +00:00
QiyuanChen
bff87ada92 添加对ERNIE-Speed和ERNIE-Lite模型的支持 (#1821)
* feat: add ERNIE-Speed and ERNIE-Lite

百度的ERNIE-Speed and ERNIE-Lite模型开始免费使用了,故添加了调用地址。可以使用ERNIE-Speed-128K,ERNIE-Speed-8K,ERNIE-Lite-8K来访问

* chore: Modify supported models in config.py

修改了config.py中千帆支持的模型列表,添加了三款免费模型
2024-05-24 00:16:26 +08:00
binary-husky
a938412b6f save conversation wrap 2024-05-23 15:58:59 +00:00
binary-husky
a48acf6fec Flex Btn Bug Fix 2024-05-22 08:38:40 +00:00
binary-husky
c6b9ab5214 add document 2024-05-22 06:39:56 +00:00
binary-husky
aa3332de69 add document 2024-05-22 06:27:26 +00:00
binary-husky
d43175d46d fix type hint 2024-05-21 13:18:38 +00:00
binary-husky
8ca9232db2 Merge branch 'master' into frontier 2024-05-21 12:27:01 +00:00
binary-husky
1339aa0e1a doc2x latex convertion 2024-05-21 12:24:50 +00:00
binary-husky
f41419e767 update demo 2024-05-21 11:12:08 +00:00
binary-husky
d88c585305 improve latex plugin 2024-05-21 10:47:50 +00:00
binary-husky
0a88d18c7a secondary menu for pdf trans 2024-05-21 08:51:29 +00:00
binary-husky
0d0edc2216 Merge branch 'frontier' of github.com:binary-husky/chatgpt_academic into frontier 2024-05-19 21:54:16 +08:00
binary-husky
5e0875fcf4 from backend to front end 2024-05-19 21:54:06 +08:00
Shixian Sheng
c508b84db8 更新了README.md/Update README.md (#1810) 2024-05-19 20:41:17 +08:00
Menghuan1918
f2b67602bb 为docker构建添加FFmpeg依赖 (#1807)
* Test: change dockerfile to install ffmpeg

* Add the ffmpeg to dockerfile (required by edge-tts)
2024-05-19 14:27:55 +08:00
binary-husky
29daba5d2f success? 2024-05-18 23:03:28 +08:00
binary-husky
9477824ac1 improve css 2024-05-18 21:54:15 +08:00
binary-husky
459c5b2d24 plugin refactor: phase 1 2024-05-18 20:23:50 +08:00
binary-husky
abf9b5aee5 Merge branch 'master' into frontier 2024-05-18 15:52:08 +08:00
binary-husky
2ce4482146 fix new ModelOverride fn bug 2024-05-18 15:47:25 +08:00
binary-husky
4282b83035 change TTS default to DISABLE 2024-05-18 15:43:35 +08:00
binary-husky
537be57c9b fix tts bugs 2024-05-17 21:07:28 +08:00
binary-husky
3aa92d6c80 change main ui hint 2024-05-17 11:34:13 +08:00
awwaawwa
b7eb9aba49 [Feature]: allow model mutex override in core_functional.py (#1708)
* allow_core_func_specify_model

* change arg name

* 模型覆盖支持热更新&当模型覆盖指向不存在的模型时报错

* allow model mutex override

---------

Co-authored-by: binary-husky <qingxu.fu@outlook.com>
2024-05-17 11:15:23 +08:00
hongyi-zhao
881a596a30 model support (gpt4o) in project. (#1760)
* Add the environment variable: OPEN_BROWSER

* Add configurable browser launching with custom arguments

- Update `config.py` to include options for specifying the browser and its arguments for opening URLs.
- Modify `main.py` to use the configured browser settings from `config.py` to launch the web page.
- Enhance `config_loader.py` to process path-like strings by expanding and normalizing paths, which supports the configuration improvements.

* Add support for the following models:

"gpt-4o", "gpt-4o-2024-05-13"

---------

Co-authored-by: binary-husky <qingxu.fu@outlook.com>
2024-05-14 17:01:32 +08:00
binary-husky
1b3c331d01 dos2unix 2024-05-14 12:02:40 +08:00
binary-husky
70d5f2a7df arg name err patch 2024-05-13 23:40:35 +08:00
Menghuan1918
fd2f8b9090 Provide a new fast and simple way of accessing APIs (As example: Yi-models,Deepseek) (#1782)
* deal with the message part

* Finish no_ui_connect

* finish predict part

* Delete old version

* An example of add new api

* Bug fix:can not change in "model_info"

* Bug fix

* Error message handling

* Clear the format

* An example of add a openai form API:Deepseek

* For compatibility reasons

* Feture: set different API/Endpoint to diferent models

* Add support for YI new models

* 更新doc2x的api key机制 (#1766)

* Fix DOC2X API key refresh issue in PDF translation

* remove add

---------

Co-authored-by: binary-husky <qingxu.fu@outlook.com>

* 修改部分文件名、变量名

* patch err

---------

Co-authored-by: alex_xiao <113411296+Alex4210987@users.noreply.github.com>
Co-authored-by: binary-husky <qingxu.fu@outlook.com>
2024-05-13 23:38:08 +08:00
binary-husky
225a2de011 Version 3.76 (#1752)
* version roll

* add upload processbar
2024-05-13 22:54:38 +08:00
binary-husky
6aea6d8e2b Merge branch 'master' into frontier 2024-05-13 22:52:15 +08:00
alex_xiao
8d85616c27 更新doc2x的api key机制 (#1766)
* Fix DOC2X API key refresh issue in PDF translation

* remove add

---------

Co-authored-by: binary-husky <qingxu.fu@outlook.com>
2024-05-13 22:49:40 +08:00
binary-husky
e4533dd24d Merge branch 'master' into frontier 2024-05-04 17:00:09 +08:00
binary-husky
43ed8cb8a8 Fix fastapi version compat 2024-05-04 16:43:42 +08:00
binary-husky
3eff964424 Update README.md 2024-05-01 17:59:25 +08:00
OREEkE
ebde98b34b Update requirements.txt (#1753)
TTS_TYPE = "EDGE_TTS"需要的依赖
2024-05-01 14:55:04 +08:00
binary-husky
6f883031c0 Update config.py 2024-05-01 14:54:36 +08:00
binary-husky
fa15059f07 add upload processbar 2024-05-01 01:11:35 +08:00
binary-husky
685c573619 version roll 2024-04-30 21:00:25 +08:00
binary-husky
5fcd02506c version 3.75 (#1702)
* Update version to 3.74

* Add support for Yi Model API (#1635)

* 更新以支持零一万物模型

* 删除newbing

* 修改config

---------

Co-authored-by: binary-husky <qingxu.fu@outlook.com>

* Refactor function signatures in bridge files

* fix qwen api change

* rename and ref functions

* rename and move some cookie functions

* 增加haiku模型,新增endpoint配置说明 (#1626)

* haiku added

* 新增haiku,新增endpoint配置说明

* Haiku added

* 将说明同步至最新Endpoint

---------

Co-authored-by: binary-husky <qingxu.fu@outlook.com>

* private_upload目录下进行文件鉴权 (#1596)

* private_upload目录下进行文件鉴权

* minor fastapi adjustment

* Add logging functionality to enable saving
conversation records

* waiting to fix username retrieve

* support 2rd web path

* allow accessing default user dir

---------

Co-authored-by: binary-husky <qingxu.fu@outlook.com>

* remove yaml deps

* fix favicon

* fix abs path auth problem

* forget to write a return

* add `dashscope` to deps

* fix GHSA-v9q9-xj86-953p

* 用户名重叠越权访问patch (#1681)

* add cohere model api access

* cohere + can_multi_thread

* fix block user access(fail)

* fix fastapi bug

* change cohere api endpoint

* explain version

* # fix com_zhipuglm.py illegal temperature problem (#1687)

* Update com_zhipuglm.py

# fix 用户在使用 zhipuai 界面时遇到了关于温度参数的非法参数错误

* allow store lm model dropdown

* add a btn to reverse previous reset

* remove extra fns

* Add support for glm-4v model (#1700)

* 修改chatglm3量化加载方式 (#1688)

Co-authored-by: zym9804 <ren990603@gmail.com>

* save chat stage 1

* consider null cookie situation

* 在点击复制按钮时激活语音

* miss some parts

* move all to js

* done first stage

* add edge tts

* bug fix

* bug fix

* remove console log

* bug fix

* bug fix

* bug fix

* audio switch

* update tts readme

* remove tempfile when done

* disable auto audio follow

* avoid play queue update after shut up

* feat: minimizing common.js

* improve tts functionality

* deterine whether the cached model is in choices

* Add support for Ollama (#1740)

* print err when doc2x not successful

* add icon

* adjust url for doc2x key version

* prepare merge

---------

Co-authored-by: Menghuan1918 <menghuan2003@outlook.com>
Co-authored-by: Skyzayre <120616113+Skyzayre@users.noreply.github.com>
Co-authored-by: XIao <46100050+Kilig947@users.noreply.github.com>
Co-authored-by: Yuki <903728862@qq.com>
Co-authored-by: zyren123 <91042213+zyren123@users.noreply.github.com>
Co-authored-by: zym9804 <ren990603@gmail.com>
2024-04-30 20:37:41 +08:00
binary-husky
bd5280df1b minor pdf translation adjustment 2024-04-30 00:52:36 +08:00
binary-husky
744759704d allow personal docx api access 2024-04-29 23:53:41 +08:00
WFS
81df0aa210 fix the issue of when using google Gemini pro, don't have chat histor… (#1743)
* fix the issue of when using google Gemini pro, don't have chat history record

just add chat_log in bridge_google_gmini.py

* Update bridge_google_gemini.py

---------

Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com>
2024-04-25 22:26:32 +08:00
Menghuan1918
cadaa81030 Fix the bug cause Nougat can not use (#1738)
* Bug fix for nougat require pdf

* Fixing bugs in a simpler and safer way
2024-04-24 12:13:44 +08:00
binary-husky
3b6cbbdcb0 Update README.md (#1736) 2024-04-24 11:41:56 +08:00
binary-husky
52e49c48b8 the latest zhipuai whl is broken 2024-04-23 18:20:36 +08:00
binary-husky
6ad15a6129 fix equation showing problem 2024-04-22 01:54:03 +08:00
binary-husky
09990d44d3 merge to resolve multiple pickle security issues (#1728)
* 注释调试if分支

* support pdf url for latex translation

* Merge pull request from GHSA-mvrw-h7rc-22r8

* 注释调试if分支

* Improve objload security

* Update README.md

* support pdf url for latex translation

---------

Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com>
Co-authored-by: binary-husky <qingxu.fu@outlook.com>

* fix import

---------

Co-authored-by: Longtaotao <longtaotao@bupt.edu.cn>
Co-authored-by: iluem <57590186+Qhaoduoyu@users.noreply.github.com>
2024-04-21 19:37:05 +08:00
binary-husky
eac5191815 Update README.md 2024-04-21 02:12:15 +08:00
owo
ae4407135d fix: 添加report_exception中缺失的a参数 (#1720)
在report_exception函数的定义中,参数a未包含默认值,因此应提供相应的值传入。
2024-04-18 16:27:00 +08:00
owo
f0e15bd710 fix: 修复了在else语句中调用'schema_str'之前未定义的问题 (#1719)
重新排列了方法中的条件返回语句,以确保在使用之前始终定义了'schema_str'。
2024-04-18 16:26:13 +08:00
jiangfy-ihep
5c5f442649 Fix: openai project API key pattern (#1721)
Co-authored-by: Fayu Jiang <jiangfayu@hotmail.com>
2024-04-18 16:24:29 +08:00
binary-husky
160552cc5f introduce doc2x 2024-04-15 01:57:31 +08:00
binary-husky
c131ec0b20 rename pdf plugin file name 2024-04-14 22:46:31 +08:00
iluem
2f3aeb7976 Merge pull request from GHSA-23cr-v6pm-j89p
* Update crazy_utils.py

Improve security

* add a white space

---------

Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com>
2024-04-14 21:51:03 +08:00
binary-husky
eff5b89b98 scan first, then extract 2024-04-14 21:36:57 +08:00
iluem
f77ab27bc9 Merge pull request from GHSA-rh7j-jfvq-857j
Prevent path traversal for improved security
2024-04-14 21:33:37 +08:00
awwaawwa
ba0a8b7072 integrate gpt-4-turbo-2024-04-09 (#1698)
* 接入 gpt-4-turbo-2024-04-09 模型

* add gpt-4-turbo and change to vision

* add gpt-4-turbo to avail llm models

* 暂时将gpt-4-turbo接入至普通版本
2024-04-11 22:02:40 +08:00
hmp
2406022c2a access vllm 2024-04-11 22:00:07 +08:00
OREEkE
02b6f26b05 remove logging in gradios.py (#1699)
如果初始主题是HF社区主题,这里使用logging会导致程序不再写入日志(包括对话内容在内的任何记录),下载主题的日志输出和程序启动时的日志初始化有冲突。
2024-04-11 14:15:12 +08:00
OREEkE
2a003e8d49 add loadLive2D() when ADD_WAIFU = False (#1693)
ADD_WAIFU = False,浏览器会抛出错误:[Error] JQuery is not defined. 因为这时候没有jQuery库可用,却依然使用了loadLive2D()函数。现在加一个判断,如果ADD_WAIFU = False,禁用jQuery库的同时也禁用loadLive2D()函数,除非ADD_WAIFU = True
2024-04-10 00:10:53 +08:00
binary-husky
21891b0f6d update translate matrix 2024-04-08 12:43:24 +08:00
共有 130 个文件被更改,包括 8375 次插入1561 次删除

7
.gitignore vendored
查看文件

@@ -131,6 +131,9 @@ dmypy.json
# Pyre type checker
.pyre/
# macOS files
.DS_Store
.vscode
.idea
@@ -153,3 +156,7 @@ media
flagged
request_llms/ChatGLM-6b-onnx-u8s8
.pre-commit-config.yaml
test.*
temp.*
objdump*
*.min.*.js

查看文件

@@ -12,11 +12,16 @@ RUN echo '[global]' > /etc/pip.conf && \
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
# 语音输出功能以下两行,第一行更换阿里源,第二行安装ffmpeg,都可以删除
RUN UBUNTU_VERSION=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release); echo "deb https://mirrors.aliyun.com/debian/ $UBUNTU_VERSION main non-free contrib" > /etc/apt/sources.list; apt-get update
RUN apt-get install ffmpeg -y
# 进入工作路径(必要)
WORKDIR /gpt
# 安装大部分依赖,利用Docker缓存加速以后的构建 (以下行,可以删除)
# 安装大部分依赖,利用Docker缓存加速以后的构建 (以下行,可以删除)
COPY requirements.txt ./
RUN pip3 install -r requirements.txt

查看文件

@@ -1,6 +1,7 @@
> [!IMPORTANT]
> 2024.3.11: 恭迎Claude3和Moonshot,全力支持Qwen、GLM、DeepseekCoder等中文大语言模型
> 2024.1.18: 更新3.70版本,支持Mermaid绘图库让大模型绘制脑图
> 2024.6.1: 版本3.80加入插件二级菜单功能详见wiki
> 2024.5.1: 加入Doc2x翻译PDF论文的功能,[查看详情](https://github.com/binary-husky/gpt_academic/wiki/Doc2x)
> 2024.3.11: 全力支持Qwen、GLM、DeepseekCoder等中文大语言模型 SoVits语音克隆模块,[查看详情](https://www.bilibili.com/video/BV1Rp421S7tF/)
> 2024.1.17: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目完全开源免费,您可通过订阅[在线服务](https://github.com/binary-husky/gpt_academic/wiki/online)的方式鼓励本项目的发展。
<br>
@@ -66,7 +67,7 @@ Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanes
读论文、[翻译](https://www.bilibili.com/video/BV1KT411x7Wn)论文 | [插件] 一键解读latex/pdf论文全文并生成摘要
Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [插件] 一键翻译或润色latex论文
批量注释生成 | [插件] 一键批量生成函数注释
Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗?就是出自他的手笔
Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README.English.md)了吗?就是出自他的手笔
[PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [插件] PDF论文提取题目&摘要+翻译全文(多线程)
[Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼写纠错+输出对照PDF
@@ -86,6 +87,10 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
<img src="https://user-images.githubusercontent.com/96192199/279702205-d81137c3-affd-4cd1-bb5e-b15610389762.gif" width="700" >
</div>
<div align="center">
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/70ff1ec5-e589-4561-a29e-b831079b37fb.gif" width="700" >
</div>
- 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放剪贴板
<div align="center">

查看文件

@@ -1,33 +1,44 @@
def check_proxy(proxies):
def check_proxy(proxies, return_ip=False):
import requests
proxies_https = proxies['https'] if proxies is not None else ''
ip = None
try:
response = requests.get("https://ipapi.co/json/", proxies=proxies, timeout=4)
data = response.json()
if 'country_name' in data:
country = data['country_name']
result = f"代理配置 {proxies_https}, 代理所在地:{country}"
if 'ip' in data: ip = data['ip']
elif 'error' in data:
alternative = _check_with_backup_source(proxies)
alternative, ip = _check_with_backup_source(proxies)
if alternative is None:
result = f"代理配置 {proxies_https}, 代理所在地未知,IP查询频率受限"
else:
result = f"代理配置 {proxies_https}, 代理所在地:{alternative}"
else:
result = f"代理配置 {proxies_https}, 代理数据解析失败:{data}"
print(result)
return result
if not return_ip:
print(result)
return result
else:
return ip
except:
result = f"代理配置 {proxies_https}, 代理所在地查询超时,代理可能无效"
print(result)
return result
if not return_ip:
print(result)
return result
else:
return ip
def _check_with_backup_source(proxies):
import random, string, requests
random_string = ''.join(random.choices(string.ascii_letters + string.digits, k=32))
try: return requests.get(f"http://{random_string}.edns.ip-api.com/json", proxies=proxies, timeout=4).json()['dns']['geo']
except: return None
try:
res_json = requests.get(f"http://{random_string}.edns.ip-api.com/json", proxies=proxies, timeout=4).json()
return res_json['dns']['geo'], res_json['dns']['ip']
except:
return None, None
def backup_and_download(current_version, remote_version):
"""
@@ -71,7 +82,7 @@ def patch_and_restart(path):
import sys
import time
import glob
from colorful import print亮黄, print亮绿, print亮红
from shared_utils.colorful import print亮黄, print亮绿, print亮红
# if not using config_private, move origin config.py as config_private.py
if not os.path.exists('config_private.py'):
print亮黄('由于您没有设置config_private.py私密配置,现将您的现有配置移动至config_private.py以防止配置丢失,',
@@ -124,7 +135,7 @@ def auto_update(raise_error=False):
current_version = f.read()
current_version = json.loads(current_version)['version']
if (remote_version - current_version) >= 0.01-1e-5:
from colorful import print亮黄
from shared_utils.colorful import print亮黄
print亮黄(f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}{new_feature}')
print('1Github更新地址:\nhttps://github.com/binary-husky/chatgpt_academic\n')
user_instruction = input('2是否一键更新代码Y+回车=确认,输入其他/无输入+回车=不更新)?')

查看文件

@@ -33,26 +33,29 @@ else:
# [step 3]>> 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview",
"gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
"gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-3-turbo",
"gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-4v", "glm-3-turbo",
"gemini-pro", "chatglm3"
]
# --- --- --- ---
# P.S. 其他可用的模型还包括
# AVAIL_LLM_MODELS = [
# "glm-4-0520", "glm-4-air", "glm-4-airx", "glm-4-flash",
# "qianfan", "deepseekcoder",
# "spark", "sparkv2", "sparkv3", "sparkv3.5",
# "spark", "sparkv2", "sparkv3", "sparkv3.5", "sparkv4",
# "qwen-turbo", "qwen-plus", "qwen-max", "qwen-local",
# "moonshot-v1-128k", "moonshot-v1-32k", "moonshot-v1-8k",
# "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-0125"
# "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-0125", "gpt-4o-2024-05-13"
# "claude-3-haiku-20240307","claude-3-sonnet-20240229","claude-3-opus-20240229", "claude-2.1", "claude-instant-1.2",
# "moss", "llama2", "chatglm_onnx", "internlm", "jittorllms_pangualpha", "jittorllms_llama",
# "yi-34b-chat-0205", "yi-34b-chat-200k"
# "deepseek-chat" ,"deepseek-coder",
# "yi-34b-chat-0205","yi-34b-chat-200k","yi-large","yi-medium","yi-spark","yi-large-turbo","yi-large-preview",
# ]
# --- --- --- ---
# 此外,为了更灵活地接入one-api多模型管理界面,您还可以在接入one-api时,
# 使用"one-api-*"前缀直接使用非标准方式接入的模型,例如
# AVAIL_LLM_MODELS = ["one-api-claude-3-sonnet-20240229(max_token=100000)"]
# 此外,您还可以在接入one-api/vllm/ollama时,
# 使用"one-api-*","vllm-*","ollama-*"前缀直接使用非标准方式接入的模型,例如
# AVAIL_LLM_MODELS = ["one-api-claude-3-sonnet-20240229(max_token=100000)", "ollama-phi3(max_token=4096)"]
# --- --- --- ---
@@ -60,7 +63,7 @@ AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-p
# 重新URL重新定向,实现更换API_URL的作用高危设置! 常规情况下不要修改! 通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人
# 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
# 举例: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://reverse-proxy-url/v1/chat/completions"}
# 举例: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://reverse-proxy-url/v1/chat/completions", "http://localhost:11434/api/chat": "在这里填写您ollama的URL"}
API_URL_REDIRECT = {}
@@ -103,6 +106,10 @@ TIMEOUT_SECONDS = 30
WEB_PORT = -1
# 是否自动打开浏览器页面
AUTO_OPEN_BROWSER = True
# 如果OpenAI不响应网络卡顿、代理失败、KEY失效,重试的次数限制
MAX_RETRY = 2
@@ -128,7 +135,7 @@ DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY
# 百度千帆LLM_MODEL="qianfan"
BAIDU_CLOUD_API_KEY = ''
BAIDU_CLOUD_SECRET_KEY = ''
BAIDU_CLOUD_QIANFAN_MODEL = 'ERNIE-Bot' # 可选 "ERNIE-Bot-4"(文心大模型4.0), "ERNIE-Bot"(文心一言), "ERNIE-Bot-turbo", "BLOOMZ-7B", "Llama-2-70B-Chat", "Llama-2-13B-Chat", "Llama-2-7B-Chat"
BAIDU_CLOUD_QIANFAN_MODEL = 'ERNIE-Bot' # 可选 "ERNIE-Bot-4"(文心大模型4.0), "ERNIE-Bot"(文心一言), "ERNIE-Bot-turbo", "BLOOMZ-7B", "Llama-2-70B-Chat", "Llama-2-13B-Chat", "Llama-2-7B-Chat", "ERNIE-Speed-128K", "ERNIE-Speed-8K", "ERNIE-Lite-8K"
# 如果使用ChatGLM2微调模型,请把 LLM_MODEL="chatglmft",并在此处指定模型路径
@@ -195,6 +202,12 @@ ALIYUN_ACCESSKEY="" # (无需填写)
ALIYUN_SECRET="" # (无需填写)
# GPT-SOVITS 文本转语音服务的运行地址(将语言模型的生成文本朗读出来)
TTS_TYPE = "EDGE_TTS" # EDGE_TTS / LOCAL_SOVITS_API / DISABLE
GPT_SOVITS_URL = ""
EDGE_TTS_VOICE = "zh-CN-XiaoxiaoNeural"
# 接入讯飞星火大模型 https://console.xfyun.cn/services/iat
XFYUN_APPID = "00000000"
XFYUN_API_SECRET = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
@@ -218,11 +231,23 @@ MOONSHOT_API_KEY = ""
YIMODEL_API_KEY = ""
# 深度求索(DeepSeek) API KEY,默认请求地址为"https://api.deepseek.com/v1/chat/completions"
DEEPSEEK_API_KEY = ""
# 紫东太初大模型 https://ai-maas.wair.ac.cn
TAICHU_API_KEY = ""
# Mathpix 拥有执行PDF的OCR功能,但是需要注册账号
MATHPIX_APPID = ""
MATHPIX_APPKEY = ""
# DOC2X的PDF解析服务,注册账号并获取API KEY: https://doc2x.noedgeai.com/login
DOC2X_API_KEY = ""
# 自定义API KEY格式
CUSTOM_API_KEY_PATTERN = ""
@@ -244,6 +269,10 @@ GROBID_URLS = [
]
# Searxng互联网检索服务
SEARXNG_URL = "https://cloud-1.agent-matrix.com/"
# 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性,默认关闭
ALLOW_RESET_CONFIG = False
@@ -252,23 +281,23 @@ ALLOW_RESET_CONFIG = False
AUTOGEN_USE_DOCKER = False
# 临时的上传文件夹位置,请修改
# 临时的上传文件夹位置,请尽量不要修改
PATH_PRIVATE_UPLOAD = "private_upload"
# 日志文件夹的位置,请修改
# 日志文件夹的位置,请尽量不要修改
PATH_LOGGING = "gpt_log"
# 除了连接OpenAI之外,还有哪些场合允许使用代理,请勿修改
# 存储翻译好的arxiv论文的路径,请尽量不要修改
ARXIV_CACHE_DIR = "gpt_log/arxiv_cache"
# 除了连接OpenAI之外,还有哪些场合允许使用代理,请尽量不要修改
WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid",
"Warmup_Modules", "Nougat_Download", "AutoGen"]
# *实验性功能*: 自动检测并屏蔽失效的KEY,请勿使用
BLOCK_INVALID_APIKEY = False
# 启用插件热加载
PLUGIN_HOT_RELOAD = False
@@ -365,6 +394,9 @@ NUM_CUSTOM_BASIC_BTN = 4
插件在线服务配置依赖关系示意图
├── 互联网检索
│ └── SEARXNG_URL
├── 语音功能
│ ├── ENABLE_AUDIO
│ ├── ALIYUN_TOKEN

查看文件

@@ -33,6 +33,8 @@ def get_core_functions():
"AutoClearHistory": False,
# [6] 文本预处理 (可选参数,默认 None,举例写个函数移除所有的换行符
"PreProcess": None,
# [7] 模型选择 (可选参数。如不设置,则使用当前全局模型;如设置,则用指定模型覆盖全局模型。)
# "ModelOverride": "gpt-3.5-turbo", # 主要用途:强制点击此基础功能按钮时,使用指定的模型。
},

查看文件

@@ -5,42 +5,56 @@ from toolbox import trimmed_format_exc
def get_crazy_functions():
from crazy_functions.读文章写摘要 import 读文章写摘要
from crazy_functions.生成函数注释 import 批量生成函数注释
from crazy_functions.解析项目源代码 import 解析项目本身
from crazy_functions.解析项目源代码 import 解析一个Python项目
from crazy_functions.解析项目源代码 import 解析一个Matlab项目
from crazy_functions.解析项目源代码 import 解析一个C项目的头文件
from crazy_functions.解析项目源代码 import 解析一个C项目
from crazy_functions.解析项目源代码 import 解析一个Golang项目
from crazy_functions.解析项目源代码 import 解析一个Rust项目
from crazy_functions.解析项目源代码 import 解析一个Java项目
from crazy_functions.解析项目源代码 import 解析一个前端项目
from crazy_functions.SourceCode_Analyse import 解析项目本身
from crazy_functions.SourceCode_Analyse import 解析一个Python项目
from crazy_functions.SourceCode_Analyse import 解析一个Matlab项目
from crazy_functions.SourceCode_Analyse import 解析一个C项目的头文件
from crazy_functions.SourceCode_Analyse import 解析一个C项目
from crazy_functions.SourceCode_Analyse import 解析一个Golang项目
from crazy_functions.SourceCode_Analyse import 解析一个Rust项目
from crazy_functions.SourceCode_Analyse import 解析一个Java项目
from crazy_functions.SourceCode_Analyse import 解析一个前端项目
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
from crazy_functions.高级功能函数模板 import Demo_Wrap
from crazy_functions.Latex全文润色 import Latex英文润色
from crazy_functions.询问多个大语言模型 import 同时问询
from crazy_functions.解析项目源代码 import 解析一个Lua项目
from crazy_functions.解析项目源代码 import 解析一个CSharp项目
from crazy_functions.SourceCode_Analyse import 解析一个Lua项目
from crazy_functions.SourceCode_Analyse import 解析一个CSharp项目
from crazy_functions.总结word文档 import 总结word文档
from crazy_functions.解析JupyterNotebook import 解析ipynb文件
from crazy_functions.对话历史存档 import 对话历史存档
from crazy_functions.对话历史存档 import 载入对话历史存档
from crazy_functions.对话历史存档 import 删除所有本地对话历史记录
from crazy_functions.Conversation_To_File import 载入对话历史存档
from crazy_functions.Conversation_To_File import 对话历史存档
from crazy_functions.Conversation_To_File import Conversation_To_File_Wrap
from crazy_functions.Conversation_To_File import 删除所有本地对话历史记录
from crazy_functions.辅助功能 import 清除缓存
from crazy_functions.批量Markdown翻译 import Markdown英译中
from crazy_functions.Markdown_Translate import Markdown英译中
from crazy_functions.批量总结PDF文档 import 批量总结PDF文档
from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档
from crazy_functions.PDF_Translate import 批量翻译PDF文档
from crazy_functions.谷歌检索小助手 import 谷歌检索小助手
from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入
from crazy_functions.Latex全文润色 import Latex中文润色
from crazy_functions.Latex全文润色 import Latex英文纠错
from crazy_functions.批量Markdown翻译 import Markdown中译英
from crazy_functions.Markdown_Translate import Markdown中译英
from crazy_functions.虚空终端 import 虚空终端
from crazy_functions.生成多种Mermaid图表 import 生成多种Mermaid图表
from crazy_functions.生成多种Mermaid图表 import Mermaid_Gen
from crazy_functions.PDF_Translate_Wrap import PDF_Tran
from crazy_functions.Latex_Function import Latex英文纠错加PDF对比
from crazy_functions.Latex_Function import Latex翻译中文并重新编译PDF
from crazy_functions.Latex_Function import PDF翻译中文并重新编译PDF
from crazy_functions.Latex_Function_Wrap import Arxiv_Localize
from crazy_functions.Latex_Function_Wrap import PDF_Localize
from crazy_functions.Internet_GPT import 连接网络回答问题
from crazy_functions.Internet_GPT_Wrap import NetworkGPT_Wrap
from crazy_functions.Image_Generate import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2
from crazy_functions.Image_Generate_Wrap import ImageGen_Wrap
from crazy_functions.SourceCode_Comment import 注释Python项目
function_plugins = {
"虚空终端": {
"Group": "对话|编程|学术|智能体",
"Color": "stop",
"AsButton": True,
"Info": "使用自然语言实现您的想法",
"Function": HotReload(虚空终端),
},
"解析整个Python项目": {
@@ -50,6 +64,13 @@ def get_crazy_functions():
"Info": "解析一个Python项目的所有源文件(.py) | 输入参数为路径",
"Function": HotReload(解析一个Python项目),
},
"注释Python项目": {
"Group": "编程",
"Color": "stop",
"AsButton": False,
"Info": "上传一系列python源文件(或者压缩包), 为这些代码添加docstring | 输入参数为路径",
"Function": HotReload(注释Python项目),
},
"载入对话历史存档(先上传存档或输入路径)": {
"Group": "对话",
"Color": "stop",
@@ -75,14 +96,21 @@ def get_crazy_functions():
"Color": "stop",
"AsButton": False,
"Info" : "基于当前对话或文件生成多种Mermaid图表,图表类型由模型判断",
"Function": HotReload(生成多种Mermaid图表),
"AdvancedArgs": True,
"ArgsReminder": "请输入图类型对应的数字,不输入则为模型自行判断:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图,9-思维导图",
"Function": None,
"Class": Mermaid_Gen
},
"Arxiv论文翻译": {
"Group": "学术",
"Color": "stop",
"AsButton": True,
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
"Function": HotReload(Latex翻译中文并重新编译PDF), # 当注册Class后,Function旧接口仅会在“虚空终端”中起作用
"Class": Arxiv_Localize, # 新一代插件需要注册Class
},
"批量总结Word文档": {
"Group": "学术",
"Color": "stop",
"AsButton": True,
"AsButton": False,
"Info": "批量总结word文档 | 输入参数为路径",
"Function": HotReload(总结word文档),
},
@@ -188,28 +216,42 @@ def get_crazy_functions():
},
"保存当前的对话": {
"Group": "对话",
"Color": "stop",
"AsButton": True,
"Info": "保存当前的对话 | 不需要输入参数",
"Function": HotReload(对话历史存档),
"Function": HotReload(对话历史存档), # 当注册Class后,Function旧接口仅会在“虚空终端”中起作用
"Class": Conversation_To_File_Wrap # 新一代插件需要注册Class
},
"[多线程Demo]解析此项目本身(源码自译解)": {
"Group": "对话|编程",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "多线程解析并翻译此项目的源码 | 不需要输入参数",
"Function": HotReload(解析项目本身),
},
"查互联网后回答": {
"Group": "对话",
"Color": "stop",
"AsButton": True, # 加入下拉菜单中
# "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
"Function": HotReload(连接网络回答问题),
"Class": NetworkGPT_Wrap # 新一代插件需要注册Class
},
"历史上的今天": {
"Group": "对话",
"AsButton": True,
"Color": "stop",
"AsButton": False,
"Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数",
"Function": HotReload(高阶功能模板函数),
"Function": None,
"Class": Demo_Wrap, # 新一代插件需要注册Class
},
"精准翻译PDF论文": {
"Group": "学术",
"Color": "stop",
"AsButton": True,
"Info": "精准翻译PDF论文为中文 | 输入参数为路径",
"Function": HotReload(批量翻译PDF文档),
"Function": HotReload(批量翻译PDF文档), # 当注册Class后,Function旧接口仅会在“虚空终端”中起作用
"Class": PDF_Tran, # 新一代插件需要注册Class
},
"询问多个GPT模型": {
"Group": "对话",
@@ -284,8 +326,85 @@ def get_crazy_functions():
"Info": "批量将Markdown文件中文翻译为英文 | 输入参数为路径或上传压缩包",
"Function": HotReload(Markdown中译英),
},
"Latex英文纠错+高亮修正位置 [需Latex]": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。",
"Function": HotReload(Latex英文纠错加PDF对比),
},
"📚Arxiv论文精细翻译输入arxivID[需Latex]": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
"Function": HotReload(Latex翻译中文并重新编译PDF), # 当注册Class后,Function旧接口仅会在“虚空终端”中起作用
"Class": Arxiv_Localize, # 新一代插件需要注册Class
},
"📚本地Latex论文精细翻译上传Latex项目[需Latex]": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
"Info": "本地Latex论文精细翻译 | 输入参数是路径",
"Function": HotReload(Latex翻译中文并重新编译PDF),
},
"PDF翻译中文并重新编译PDF上传PDF[需Latex]": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
"Info": "PDF翻译中文,并重新编译PDF | 输入参数为路径",
"Function": HotReload(PDF翻译中文并重新编译PDF), # 当注册Class后,Function旧接口仅会在“虚空终端”中起作用
"Class": PDF_Localize # 新一代插件需要注册Class
}
}
function_plugins.update(
{
"🎨图片生成DALLE2/DALLE3, 使用前切换到GPT系列模型": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"Info": "使用 DALLE2/DALLE3 生成图片 | 输入参数字符串,提供图像的内容",
"Function": HotReload(图片生成_DALLE2), # 当注册Class后,Function旧接口仅会在“虚空终端”中起作用
"Class": ImageGen_Wrap # 新一代插件需要注册Class
},
}
)
function_plugins.update(
{
"🎨图片修改_DALLE2 使用前请切换模型到GPT系列": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": False, # 调用时,唤起高级参数输入区默认False
# "Info": "使用DALLE2修改图片 | 输入参数字符串,提供图像的内容",
"Function": HotReload(图片修改_DALLE2),
},
}
)
# -=--=- 尚未充分测试的实验性插件 & 需要额外依赖的插件 -=--=-
try:
from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要
@@ -305,39 +424,39 @@ def get_crazy_functions():
print(trimmed_format_exc())
print("Load function plugin failed")
try:
from crazy_functions.联网的ChatGPT import 连接网络回答问题
# try:
# from crazy_functions.联网的ChatGPT import 连接网络回答问题
function_plugins.update(
{
"连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
"Group": "对话",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
# "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
"Function": HotReload(连接网络回答问题),
}
}
)
from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
# function_plugins.update(
# {
# "连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
# "Group": "对话",
# "Color": "stop",
# "AsButton": False, # 加入下拉菜单中
# # "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
# "Function": HotReload(连接网络回答问题),
# }
# }
# )
# from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
function_plugins.update(
{
"连接网络回答问题中文Bing版,输入问题后点击该插件": {
"Group": "对话",
"Color": "stop",
"AsButton": False, # 加入下拉菜单中
"Info": "连接网络回答问题需要访问中文Bing| 输入参数是一个问题",
"Function": HotReload(连接bing搜索回答问题),
}
}
)
except:
print(trimmed_format_exc())
print("Load function plugin failed")
# function_plugins.update(
# {
# "连接网络回答问题中文Bing版,输入问题后点击该插件": {
# "Group": "对话",
# "Color": "stop",
# "AsButton": False, # 加入下拉菜单中
# "Info": "连接网络回答问题需要访问中文Bing| 输入参数是一个问题",
# "Function": HotReload(连接bing搜索回答问题),
# }
# }
# )
# except:
# print(trimmed_format_exc())
# print("Load function plugin failed")
try:
from crazy_functions.解析项目源代码 import 解析任意code项目
from crazy_functions.SourceCode_Analyse import 解析任意code项目
function_plugins.update(
{
@@ -374,50 +493,7 @@ def get_crazy_functions():
print(trimmed_format_exc())
print("Load function plugin failed")
try:
from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2
function_plugins.update(
{
"图片生成_DALLE2 先切换模型到gpt-*": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True, # 调用时,唤起高级参数输入区默认False
"ArgsReminder": "在这里输入分辨率, 如1024x1024默认,支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示
"Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容",
"Function": HotReload(图片生成_DALLE2),
},
}
)
function_plugins.update(
{
"图片生成_DALLE3 先切换模型到gpt-*": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True, # 调用时,唤起高级参数输入区默认False
"ArgsReminder": "在这里输入自定义参数「分辨率-质量(可选)-风格(可选)」, 参数示例「1024x1024-hd-vivid」 || 分辨率支持 「1024x1024」(默认) /「1792x1024」/「1024x1792」 || 质量支持 「-standard」(默认) /「-hd」 || 风格支持 「-vivid」(默认) /「-natural」", # 高级参数输入区的显示提示
"Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容",
"Function": HotReload(图片生成_DALLE3),
},
}
)
function_plugins.update(
{
"图片修改_DALLE2 先切换模型到gpt-*": {
"Group": "对话",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": False, # 调用时,唤起高级参数输入区默认False
# "Info": "使用DALLE2修改图片 | 输入参数字符串,提供图像的内容",
"Function": HotReload(图片修改_DALLE2),
},
}
)
except:
print(trimmed_format_exc())
print("Load function plugin failed")
try:
from crazy_functions.总结音视频 import 总结音视频
@@ -458,7 +534,7 @@ def get_crazy_functions():
print("Load function plugin failed")
try:
from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言
from crazy_functions.Markdown_Translate import Markdown翻译指定语言
function_plugins.update(
{
@@ -531,59 +607,6 @@ def get_crazy_functions():
print(trimmed_format_exc())
print("Load function plugin failed")
try:
from crazy_functions.Latex输出PDF import Latex英文纠错加PDF对比
from crazy_functions.Latex输出PDF import Latex翻译中文并重新编译PDF
from crazy_functions.Latex输出PDF import PDF翻译中文并重新编译PDF
function_plugins.update(
{
"Latex英文纠错+高亮修正位置 [需Latex]": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。",
"Function": HotReload(Latex英文纠错加PDF对比),
},
"Arxiv论文精细翻译输入arxivID[需Latex]": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
"Function": HotReload(Latex翻译中文并重新编译PDF),
},
"本地Latex论文精细翻译上传Latex项目[需Latex]": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
"Info": "本地Latex论文精细翻译 | 输入参数是路径",
"Function": HotReload(Latex翻译中文并重新编译PDF),
},
"PDF翻译中文并重新编译PDF上传PDF[需Latex]": {
"Group": "学术",
"Color": "stop",
"AsButton": False,
"AdvancedArgs": True,
"ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
"Info": "PDF翻译中文,并重新编译PDF | 输入参数为路径",
"Function": HotReload(PDF翻译中文并重新编译PDF)
}
}
)
except:
print(trimmed_format_exc())
print("Load function plugin failed")
try:
from toolbox import get_conf

查看文件

@@ -1,4 +1,5 @@
from toolbox import CatchException, update_ui, promote_file_to_downloadzone, get_log_folder, get_user
from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
import re
f_prefix = 'GPT-Academic对话存档'
@@ -9,27 +10,61 @@ def write_chat_to_file(chatbot, history=None, file_name=None):
"""
import os
import time
from themes.theme import advanced_css
if file_name is None:
file_name = f_prefix + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html'
fp = os.path.join(get_log_folder(get_user(chatbot), plugin_name='chat_history'), file_name)
with open(fp, 'w', encoding='utf8') as f:
from themes.theme import advanced_css
f.write(f'<!DOCTYPE html><head><meta charset="utf-8"><title>对话历史</title><style>{advanced_css}</style></head>')
from textwrap import dedent
form = dedent("""
<!DOCTYPE html><head><meta charset="utf-8"><title>对话存档</title><style>{CSS}</style></head>
<body>
<div class="test_temp1" style="width:10%; height: 500px; float:left;"></div>
<div class="test_temp2" style="width:80%;padding: 40px;float:left;padding-left: 20px;padding-right: 20px;box-shadow: rgba(0, 0, 0, 0.2) 0px 0px 8px 8px;border-radius: 10px;">
<div class="chat-body" style="display: flex;justify-content: center;flex-direction: column;align-items: center;flex-wrap: nowrap;">
{CHAT_PREVIEW}
<div></div>
<div></div>
<div style="text-align: center;width:80%;padding: 0px;float:left;padding-left:20px;padding-right:20px;box-shadow: rgba(0, 0, 0, 0.05) 0px 0px 1px 2px;border-radius: 1px;">对话原始数据</div>
{HISTORY_PREVIEW}
</div>
</div>
<div class="test_temp3" style="width:10%; height: 500px; float:left;"></div>
</body>
""")
qa_from = dedent("""
<div class="QaBox" style="width:80%;padding: 20px;margin-bottom: 20px;box-shadow: rgb(0 255 159 / 50%) 0px 0px 1px 2px;border-radius: 4px;">
<div class="Question" style="border-radius: 2px;">{QUESTION}</div>
<hr color="blue" style="border-top: dotted 2px #ccc;">
<div class="Answer" style="border-radius: 2px;">{ANSWER}</div>
</div>
""")
history_from = dedent("""
<div class="historyBox" style="width:80%;padding: 0px;float:left;padding-left:20px;padding-right:20px;box-shadow: rgba(0, 0, 0, 0.05) 0px 0px 1px 2px;border-radius: 1px;">
<div class="entry" style="border-radius: 2px;">{ENTRY}</div>
</div>
""")
CHAT_PREVIEW_BUF = ""
for i, contents in enumerate(chatbot):
for j, content in enumerate(contents):
try: # 这个bug没找到触发条件,暂时先这样顶一下
if type(content) != str: content = str(content)
except:
continue
f.write(content)
if j == 0:
f.write('<hr style="border-top: dotted 3px #ccc;">')
f.write('<hr color="red"> \n\n')
f.write('<hr color="blue"> \n\n raw chat context:\n')
f.write('<code>')
question, answer = contents[0], contents[1]
if question is None: question = ""
try: question = str(question)
except: question = ""
if answer is None: answer = ""
try: answer = str(answer)
except: answer = ""
CHAT_PREVIEW_BUF += qa_from.format(QUESTION=question, ANSWER=answer)
HISTORY_PREVIEW_BUF = ""
for h in history:
f.write("\n>>>" + h)
f.write('</code>')
HISTORY_PREVIEW_BUF += history_from.format(ENTRY=h)
html_content = form.format(CHAT_PREVIEW=CHAT_PREVIEW_BUF, HISTORY_PREVIEW=HISTORY_PREVIEW_BUF, CSS=advanced_css)
f.write(html_content)
promote_file_to_downloadzone(fp, rename_file=file_name, chatbot=chatbot)
return '对话历史写入:' + fp
@@ -40,7 +75,7 @@ def gen_file_preview(file_name):
# pattern to match the text between <head> and </head>
pattern = re.compile(r'<head>.*?</head>', flags=re.DOTALL)
file_content = re.sub(pattern, '', file_content)
html, history = file_content.split('<hr color="blue"> \n\n raw chat context:\n')
html, history = file_content.split('<hr color="blue"> \n\n 对话数据 (无渲染):\n')
history = history.strip('<code>')
history = history.strip('</code>')
history = history.split("\n>>>")
@@ -51,21 +86,25 @@ def gen_file_preview(file_name):
def read_file_to_chat(chatbot, history, file_name):
with open(file_name, 'r', encoding='utf8') as f:
file_content = f.read()
# pattern to match the text between <head> and </head>
pattern = re.compile(r'<head>.*?</head>', flags=re.DOTALL)
file_content = re.sub(pattern, '', file_content)
html, history = file_content.split('<hr color="blue"> \n\n raw chat context:\n')
history = history.strip('<code>')
history = history.strip('</code>')
history = history.split("\n>>>")
history = list(filter(lambda x:x!="", history))
html = html.split('<hr color="red"> \n\n')
html = list(filter(lambda x:x!="", html))
from bs4 import BeautifulSoup
soup = BeautifulSoup(file_content, 'lxml')
# 提取QaBox信息
chatbot.clear()
for i, h in enumerate(html):
i_say, gpt_say = h.split('<hr style="border-top: dotted 3px #ccc;">')
chatbot.append([i_say, gpt_say])
chatbot.append([f"存档文件详情?", f"[Local Message] 载入对话{len(html)}条,上下文{len(history)}条。"])
qa_box_list = []
qa_boxes = soup.find_all("div", class_="QaBox")
for box in qa_boxes:
question = box.find("div", class_="Question").get_text(strip=False)
answer = box.find("div", class_="Answer").get_text(strip=False)
qa_box_list.append({"Question": question, "Answer": answer})
chatbot.append([question, answer])
# 提取historyBox信息
history_box_list = []
history_boxes = soup.find_all("div", class_="historyBox")
for box in history_boxes:
entry = box.find("div", class_="entry").get_text(strip=False)
history_box_list.append(entry)
history = history_box_list
chatbot.append([None, f"[Local Message] 载入对话{len(qa_box_list)}条,上下文{len(history)}条。"])
return chatbot, history
@CatchException
@@ -79,11 +118,42 @@ def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
system_prompt 给gpt的静默提醒
user_request 当前用户的请求信息IP地址等
"""
file_name = plugin_kwargs.get("file_name", None)
if (file_name is not None) and (file_name != "") and (not file_name.endswith('.html')): file_name += '.html'
else: file_name = None
chatbot.append(("保存当前对话",
f"[Local Message] {write_chat_to_file(chatbot, history)},您可以调用下拉菜单中的“载入对话历史存档”还原当下的对话。"))
chatbot.append((None, f"[Local Message] {write_chat_to_file(chatbot, history, file_name)},您可以调用下拉菜单中的“载入对话历史存档”还原当下的对话"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
class Conversation_To_File_Wrap(GptAcademicPluginTemplate):
def __init__(self):
"""
请注意`execute`会执行在不同的线程中因此您在定义和使用类变量时应当慎之又慎
"""
pass
def define_arg_selection_menu(self):
"""
定义插件的二级选项菜单
第一个参数名称`file_name`参数`type`声明这是一个文本框文本框上方显示`title`文本框内部显示`description``default_value`为默认值
"""
gui_definition = {
"file_name": ArgProperty(title="保存文件名", description="输入对话存档文件名,留空则使用时间作为文件名", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
}
return gui_definition
def execute(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
执行插件
"""
yield from 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
def hide_cwd(str):
import os
current_path = os.getcwd()
@@ -147,6 +217,4 @@ def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot
os.remove(f)
chatbot.append([f"删除所有历史对话文件", f"已删除<br/>{local_history}"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
return

查看文件

@@ -108,7 +108,7 @@ def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys
chatbot.append((prompt, "[Local Message] 图像生成提示为空白,请在“输入区”输入图像生成提示。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
return
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 使用前请切换模型到GPT系列。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
resolution = plugin_kwargs.get("advanced_arg", '1024x1024')
@@ -129,7 +129,7 @@ def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys
chatbot.append((prompt, "[Local Message] 图像生成提示为空白,请在“输入区”输入图像生成提示。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
return
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 使用前请切换模型到GPT系列。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
resolution_arg = plugin_kwargs.get("advanced_arg", '1024x1024-standard-vivid').lower()
@@ -166,7 +166,7 @@ class ImageEditState(GptAcademicState):
return confirm, file
def lock_plugin(self, chatbot):
chatbot._cookies['lock_plugin'] = 'crazy_functions.图片生成->图片修改_DALLE2'
chatbot._cookies['lock_plugin'] = 'crazy_functions.Image_Generate->图片修改_DALLE2'
self.dump_state(chatbot)
def unlock_plugin(self, chatbot):

查看文件

@@ -0,0 +1,56 @@
from toolbox import get_conf, update_ui
from crazy_functions.Image_Generate import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2
from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
class ImageGen_Wrap(GptAcademicPluginTemplate):
def __init__(self):
"""
请注意`execute`会执行在不同的线程中,因此您在定义和使用类变量时,应当慎之又慎!
"""
pass
def define_arg_selection_menu(self):
"""
定义插件的二级选项菜单
第一个参数,名称`main_input`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description`,`default_value`为默认值;
第二个参数,名称`advanced_arg`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description`,`default_value`为默认值;
"""
gui_definition = {
"main_input":
ArgProperty(title="输入图片描述", description="需要生成图像的文本描述,尽量使用英文", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
"model_name":
ArgProperty(title="模型", options=["DALLE2", "DALLE3"], default_value="DALLE3", description="", type="dropdown").model_dump_json(),
"resolution":
ArgProperty(title="分辨率", options=["256x256(限DALLE2)", "512x512(限DALLE2)", "1024x1024", "1792x1024(限DALLE3)", "1024x1792(限DALLE3)"], default_value="1024x1024", description="", type="dropdown").model_dump_json(),
"quality (仅DALLE3生效)":
ArgProperty(title="质量", options=["standard", "hd"], default_value="standard", description="", type="dropdown").model_dump_json(),
"style (仅DALLE3生效)":
ArgProperty(title="风格", options=["vivid", "natural"], default_value="vivid", description="", type="dropdown").model_dump_json(),
}
return gui_definition
def execute(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
执行插件
"""
# 分辨率
resolution = plugin_kwargs["resolution"].replace("(限DALLE2)", "").replace("(限DALLE3)", "")
if plugin_kwargs["model_name"] == "DALLE2":
plugin_kwargs["advanced_arg"] = resolution
yield from 图片生成_DALLE2(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
elif plugin_kwargs["model_name"] == "DALLE3":
quality = plugin_kwargs["quality (仅DALLE3生效)"]
style = plugin_kwargs["style (仅DALLE3生效)"]
plugin_kwargs["advanced_arg"] = f"{resolution}-{quality}-{style}"
yield from 图片生成_DALLE3(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
else:
chatbot.append([None, "抱歉,找不到该模型"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

查看文件

@@ -0,0 +1,278 @@
import requests
import random
import time
import re
import json
from bs4 import BeautifulSoup
from functools import lru_cache
from itertools import zip_longest
from check_proxy import check_proxy
from toolbox import CatchException, update_ui, get_conf
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
from request_llms.bridge_all import model_info
from request_llms.bridge_all import predict_no_ui_long_connection
from crazy_functions.prompts.internet import SearchOptimizerPrompt, SearchAcademicOptimizerPrompt
def search_optimizer(
query,
proxies,
history,
llm_kwargs,
optimizer=1,
categories="general",
searxng_url=None,
engines=None,
):
# ------------- < 第1步尝试进行搜索优化 > -------------
# * 增强优化,会尝试结合历史记录进行搜索优化
if optimizer == 2:
his = " "
if len(history) == 0:
pass
else:
for i, h in enumerate(history):
if i % 2 == 0:
his += f"Q: {h}\n"
else:
his += f"A: {h}\n"
if categories == "general":
sys_prompt = SearchOptimizerPrompt.format(query=query, history=his, num=4)
elif categories == "science":
sys_prompt = SearchAcademicOptimizerPrompt.format(query=query, history=his, num=4)
else:
his = " "
if categories == "general":
sys_prompt = SearchOptimizerPrompt.format(query=query, history=his, num=3)
elif categories == "science":
sys_prompt = SearchAcademicOptimizerPrompt.format(query=query, history=his, num=3)
mutable = ["", time.time(), ""]
llm_kwargs["temperature"] = 0.8
try:
querys_json = predict_no_ui_long_connection(
inputs=query,
llm_kwargs=llm_kwargs,
history=[],
sys_prompt=sys_prompt,
observe_window=mutable,
)
except Exception:
querys_json = "1234"
#* 尝试解码优化后的搜索结果
querys_json = re.sub(r"```json|```", "", querys_json)
try:
querys = json.loads(querys_json)
except Exception:
#* 如果解码失败,降低温度再试一次
try:
llm_kwargs["temperature"] = 0.4
querys_json = predict_no_ui_long_connection(
inputs=query,
llm_kwargs=llm_kwargs,
history=[],
sys_prompt=sys_prompt,
observe_window=mutable,
)
querys_json = re.sub(r"```json|```", "", querys_json)
querys = json.loads(querys_json)
except Exception:
#* 如果再次失败,直接返回原始问题
querys = [query]
links = []
success = 0
Exceptions = ""
for q in querys:
try:
link = searxng_request(q, proxies, categories, searxng_url, engines=engines)
if len(link) > 0:
links.append(link[:-5])
success += 1
except Exception:
Exceptions = Exception
pass
if success == 0:
raise ValueError(f"在线搜索失败!\n{Exceptions}")
# * 清洗搜索结果,依次放入每组第一,第二个搜索结果,并清洗重复的搜索结果
seen_links = set()
result = []
for tuple in zip_longest(*links, fillvalue=None):
for item in tuple:
if item is not None:
link = item["link"]
if link not in seen_links:
seen_links.add(link)
result.append(item)
return result
@lru_cache
def get_auth_ip():
ip = check_proxy(None, return_ip=True)
if ip is None:
return '114.114.114.' + str(random.randint(1, 10))
return ip
def searxng_request(query, proxies, categories='general', searxng_url=None, engines=None):
if searxng_url is None:
url = get_conf("SEARXNG_URL")
else:
url = searxng_url
if engines == "Mixed":
engines = None
if categories == 'general':
params = {
'q': query, # 搜索查询
'format': 'json', # 输出格式为JSON
'language': 'zh', # 搜索语言
'engines': engines,
}
elif categories == 'science':
params = {
'q': query, # 搜索查询
'format': 'json', # 输出格式为JSON
'language': 'zh', # 搜索语言
'categories': 'science'
}
else:
raise ValueError('不支持的检索类型')
headers = {
'Accept-Language': 'zh-CN,zh;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'X-Forwarded-For': get_auth_ip(),
'X-Real-IP': get_auth_ip()
}
results = []
response = requests.post(url, params=params, headers=headers, proxies=proxies, timeout=30)
if response.status_code == 200:
json_result = response.json()
for result in json_result['results']:
item = {
"title": result.get("title", ""),
"source": result.get("engines", "unknown"),
"content": result.get("content", ""),
"link": result["url"],
}
results.append(item)
return results
else:
if response.status_code == 429:
raise ValueError("Searxng在线搜索服务当前使用人数太多,请稍后。")
else:
raise ValueError("在线搜索失败,状态码: " + str(response.status_code) + '\t' + response.content.decode('utf-8'))
def scrape_text(url, proxies) -> str:
"""Scrape text from a webpage
Args:
url (str): The URL to scrape text from
Returns:
str: The scraped text
"""
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36',
'Content-Type': 'text/plain',
}
try:
response = requests.get(url, headers=headers, proxies=proxies, timeout=8)
if response.encoding == "ISO-8859-1": response.encoding = response.apparent_encoding
except:
return "无法连接到该网页"
soup = BeautifulSoup(response.text, "html.parser")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = "\n".join(chunk for chunk in chunks if chunk)
return text
@CatchException
def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
optimizer_history = history[:-8]
history = [] # 清空历史,以免输入溢出
chatbot.append((f"请结合互联网信息回答以下问题:{txt}", "检索中..."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# ------------- < 第1步爬取搜索引擎的结果 > -------------
from toolbox import get_conf
proxies = get_conf('proxies')
categories = plugin_kwargs.get('categories', 'general')
searxng_url = plugin_kwargs.get('searxng_url', None)
engines = plugin_kwargs.get('engine', None)
optimizer = plugin_kwargs.get('optimizer', "关闭")
if optimizer == "关闭":
urls = searxng_request(txt, proxies, categories, searxng_url, engines=engines)
else:
urls = search_optimizer(txt, proxies, optimizer_history, llm_kwargs, optimizer, categories, searxng_url, engines)
history = []
if len(urls) == 0:
chatbot.append((f"结论:{txt}",
"[Local Message] 受到限制,无法从searxng获取信息请尝试更换搜索引擎。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# ------------- < 第2步依次访问网页 > -------------
max_search_result = 5 # 最多收纳多少个网页的结果
if optimizer == "开启(增强)":
max_search_result = 8
chatbot.append(["联网检索中 ...", None])
for index, url in enumerate(urls[:max_search_result]):
res = scrape_text(url['link'], proxies)
prefix = f"{index}份搜索结果 [源自{url['source'][0]}搜索] {url['title'][:25]}"
history.extend([prefix, res])
res_squeeze = res.replace('\n', '...')
chatbot[-1] = [prefix + "\n\n" + res_squeeze[:500] + "......", None]
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# ------------- < 第3步ChatGPT综合 > -------------
if (optimizer != "开启(增强)"):
i_say = f"从以上搜索结果中抽取信息,然后回答问题:{txt}"
i_say, history = input_clipping( # 裁剪输入,从最长的条目开始裁剪,防止爆token
inputs=i_say,
history=history,
max_token_limit=min(model_info[llm_kwargs['llm_model']]['max_token']*3//4, 8192)
)
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say, inputs_show_user=i_say,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
sys_prompt="请从给定的若干条搜索结果中抽取信息,对最相关的两个搜索结果进行总结,然后回答问题。"
)
chatbot[-1] = (i_say, gpt_say)
history.append(i_say);history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
#* 或者使用搜索优化器,这样可以保证后续问答能读取到有效的历史记录
else:
i_say = f"从以上搜索结果中抽取与问题:{txt} 相关的信息:"
i_say, history = input_clipping( # 裁剪输入,从最长的条目开始裁剪,防止爆token
inputs=i_say,
history=history,
max_token_limit=min(model_info[llm_kwargs['llm_model']]['max_token']*3//4, 8192)
)
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say, inputs_show_user=i_say,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
sys_prompt="请从给定的若干条搜索结果中抽取信息,对最相关的三个搜索结果进行总结"
)
chatbot[-1] = (i_say, gpt_say)
history = []
history.append(i_say);history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
# ------------- < 第4步根据综合回答问题 > -------------
i_say = f"请根据以上搜索结果回答问题:{txt}"
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say, inputs_show_user=i_say,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
sys_prompt="请根据给定的若干条搜索结果回答问题"
)
chatbot[-1] = (i_say, gpt_say)
history.append(i_say);history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history)

查看文件

@@ -0,0 +1,45 @@
from toolbox import get_conf
from crazy_functions.Internet_GPT import 连接网络回答问题
from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
class NetworkGPT_Wrap(GptAcademicPluginTemplate):
def __init__(self):
"""
请注意`execute`会执行在不同的线程中,因此您在定义和使用类变量时,应当慎之又慎!
"""
pass
def define_arg_selection_menu(self):
"""
定义插件的二级选项菜单
第一个参数,名称`main_input`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description`,`default_value`为默认值;
第二个参数,名称`advanced_arg`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description`,`default_value`为默认值;
第三个参数,名称`allow_cache`,参数`type`声明这是一个下拉菜单,下拉菜单上方显示`title`+`description`,下拉菜单的选项为`options`,`default_value`为下拉菜单默认值;
"""
gui_definition = {
"main_input":
ArgProperty(title="输入问题", description="待通过互联网检索的问题,会自动读取输入框内容", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
"categories":
ArgProperty(title="搜索分类", options=["网页", "学术论文"], default_value="网页", description="", type="dropdown").model_dump_json(),
"engine":
ArgProperty(title="选择搜索引擎", options=["Mixed", "bing", "google", "duckduckgo"], default_value="google", description="", type="dropdown").model_dump_json(),
"optimizer":
ArgProperty(title="搜索优化", options=["关闭", "开启", "开启(增强)"], default_value="关闭", description="是否使用搜索增强。注意这可能会消耗较多token", type="dropdown").model_dump_json(),
"searxng_url":
ArgProperty(title="Searxng服务地址", description="输入Searxng的地址", default_value=get_conf("SEARXNG_URL"), type="string").model_dump_json(), # 主输入,自动从输入框同步
}
return gui_definition
def execute(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
执行插件
"""
if plugin_kwargs["categories"] == "网页": plugin_kwargs["categories"] = "general"
if plugin_kwargs["categories"] == "学术论文": plugin_kwargs["categories"] = "science"
yield from 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)

查看文件

@@ -4,7 +4,7 @@ from functools import partial
import glob, os, requests, time, json, tarfile
pj = os.path.join
ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
ARXIV_CACHE_DIR = get_conf("ARXIV_CACHE_DIR")
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 工具函数 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
@@ -107,6 +107,10 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
except ValueError:
return False
if txt.startswith('https://arxiv.org/pdf/'):
arxiv_id = txt.split('/')[-1] # 2402.14207v2.pdf
txt = arxiv_id.split('v')[0] # 2402.14207
if ('.' in txt) and ('/' not in txt) and is_float(txt): # is arxiv ID
txt = 'https://arxiv.org/abs/' + txt.strip()
if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID
@@ -121,6 +125,7 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
time.sleep(1) # 刷新界面
url_ = txt # https://arxiv.org/abs/1707.06690
if not txt.startswith('https://arxiv.org/abs/'):
msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}"
yield from update_ui_lastest_msg(msg, chatbot=chatbot, history=history) # 刷新界面
@@ -153,65 +158,72 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
return extract_dst, arxiv_id
def pdf2tex_project(pdf_file_path):
# Mathpix API credentials
app_id, app_key = get_conf('MATHPIX_APPID', 'MATHPIX_APPKEY')
headers = {"app_id": app_id, "app_key": app_key}
def pdf2tex_project(pdf_file_path, plugin_kwargs):
if plugin_kwargs["method"] == "MATHPIX":
# Mathpix API credentials
app_id, app_key = get_conf('MATHPIX_APPID', 'MATHPIX_APPKEY')
headers = {"app_id": app_id, "app_key": app_key}
# Step 1: Send PDF file for processing
options = {
"conversion_formats": {"tex.zip": True},
"math_inline_delimiters": ["$", "$"],
"rm_spaces": True
}
# Step 1: Send PDF file for processing
options = {
"conversion_formats": {"tex.zip": True},
"math_inline_delimiters": ["$", "$"],
"rm_spaces": True
}
response = requests.post(url="https://api.mathpix.com/v3/pdf",
headers=headers,
data={"options_json": json.dumps(options)},
files={"file": open(pdf_file_path, "rb")})
response = requests.post(url="https://api.mathpix.com/v3/pdf",
headers=headers,
data={"options_json": json.dumps(options)},
files={"file": open(pdf_file_path, "rb")})
if response.ok:
pdf_id = response.json()["pdf_id"]
print(f"PDF processing initiated. PDF ID: {pdf_id}")
if response.ok:
pdf_id = response.json()["pdf_id"]
print(f"PDF processing initiated. PDF ID: {pdf_id}")
# Step 2: Check processing status
while True:
conversion_response = requests.get(f"https://api.mathpix.com/v3/pdf/{pdf_id}", headers=headers)
conversion_data = conversion_response.json()
# Step 2: Check processing status
while True:
conversion_response = requests.get(f"https://api.mathpix.com/v3/pdf/{pdf_id}", headers=headers)
conversion_data = conversion_response.json()
if conversion_data["status"] == "completed":
print("PDF processing completed.")
break
elif conversion_data["status"] == "error":
print("Error occurred during processing.")
else:
print(f"Processing status: {conversion_data['status']}")
time.sleep(5) # wait for a few seconds before checking again
if conversion_data["status"] == "completed":
print("PDF processing completed.")
break
elif conversion_data["status"] == "error":
print("Error occurred during processing.")
else:
print(f"Processing status: {conversion_data['status']}")
time.sleep(5) # wait for a few seconds before checking again
# Step 3: Save results to local files
output_dir = os.path.join(os.path.dirname(pdf_file_path), 'mathpix_output')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Step 3: Save results to local files
output_dir = os.path.join(os.path.dirname(pdf_file_path), 'mathpix_output')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
url = f"https://api.mathpix.com/v3/pdf/{pdf_id}.tex"
response = requests.get(url, headers=headers)
file_name_wo_dot = '_'.join(os.path.basename(pdf_file_path).split('.')[:-1])
output_name = f"{file_name_wo_dot}.tex.zip"
output_path = os.path.join(output_dir, output_name)
with open(output_path, "wb") as output_file:
output_file.write(response.content)
print(f"tex.zip file saved at: {output_path}")
url = f"https://api.mathpix.com/v3/pdf/{pdf_id}.tex"
response = requests.get(url, headers=headers)
file_name_wo_dot = '_'.join(os.path.basename(pdf_file_path).split('.')[:-1])
output_name = f"{file_name_wo_dot}.tex.zip"
output_path = os.path.join(output_dir, output_name)
with open(output_path, "wb") as output_file:
output_file.write(response.content)
print(f"tex.zip file saved at: {output_path}")
import zipfile
unzip_dir = os.path.join(output_dir, file_name_wo_dot)
with zipfile.ZipFile(output_path, 'r') as zip_ref:
zip_ref.extractall(unzip_dir)
import zipfile
unzip_dir = os.path.join(output_dir, file_name_wo_dot)
with zipfile.ZipFile(output_path, 'r') as zip_ref:
zip_ref.extractall(unzip_dir)
return unzip_dir
else:
print(f"Error sending PDF for processing. Status code: {response.status_code}")
return None
else:
from crazy_functions.pdf_fns.parse_pdf_via_doc2x import 解析PDF_DOC2X_转Latex
unzip_dir = 解析PDF_DOC2X_转Latex(pdf_file_path)
return unzip_dir
else:
print(f"Error sending PDF for processing. Status code: {response.status_code}")
return None
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
@@ -221,7 +233,7 @@ def pdf2tex_project(pdf_file_path):
def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
# <-------------- information about this plugin ------------->
chatbot.append(["函数插件功能?",
"对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"])
"对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# <-------------- more requirements ------------->
@@ -259,6 +271,8 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo
project_folder = desend_to_extracted_folder_if_exist(project_folder)
# <-------------- move latex project away from temp folder ------------->
from shared_utils.fastapi_server import validate_path_safety
validate_path_safety(project_folder, chatbot.get_user())
project_folder = move_project(project_folder, arxiv_id=None)
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
@@ -282,7 +296,7 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
else:
chatbot.append((f"失败了",
'虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 也是可读的, 您可以到Github Issue区, 用该压缩包+对话历史存档进行反馈 ...'))
'虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 也是可读的, 您可以到Github Issue区, 用该压缩包+Conversation_To_File进行反馈 ...'))
yield from update_ui(chatbot=chatbot, history=history);
time.sleep(1) # 刷新界面
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
@@ -298,7 +312,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
# <-------------- information about this plugin ------------->
chatbot.append([
"函数插件功能?",
"对整个Latex项目进行翻译, 生成中文PDF。函数插件贡献者: Binary-Husky。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
"对整个Latex项目进行翻译, 生成中文PDF。函数插件贡献者: Binary-Husky。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# <-------------- more requirements ------------->
@@ -353,6 +367,8 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
project_folder = desend_to_extracted_folder_if_exist(project_folder)
# <-------------- move latex project away from temp folder ------------->
from shared_utils.fastapi_server import validate_path_safety
validate_path_safety(project_folder, chatbot.get_user())
project_folder = move_project(project_folder, arxiv_id)
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
@@ -392,7 +408,7 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
# <-------------- information about this plugin ------------->
chatbot.append([
"函数插件功能?",
"将PDF转换为Latex项目,翻译为中文后重新编译为PDF。函数插件贡献者: Marroh。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
"将PDF转换为Latex项目,翻译为中文后重新编译为PDF。函数插件贡献者: Marroh。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# <-------------- more requirements ------------->
@@ -432,107 +448,101 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"不支持同时处理多个pdf文件: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
app_id, app_key = get_conf('MATHPIX_APPID', 'MATHPIX_APPKEY')
if len(app_id) == 0 or len(app_key) == 0:
report_exception(chatbot, history, a="缺失 MATHPIX_APPID 和 MATHPIX_APPKEY。", b=f"请配置 MATHPIX_APPID 和 MATHPIX_APPKEY")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
hash_tag = map_file_to_sha256(file_manifest[0])
# <-------------- check repeated pdf ------------->
chatbot.append([f"检查PDF是否被重复上传", "正在检查..."])
yield from update_ui(chatbot=chatbot, history=history)
repeat, project_folder = check_repeat_upload(file_manifest[0], hash_tag)
except_flag = False
if repeat:
yield from update_ui_lastest_msg(f"发现重复上传,请查收结果(压缩包)...", chatbot=chatbot, history=history)
try:
trans_html_file = [f for f in glob.glob(f'{project_folder}/**/*.trans.html', recursive=True)][0]
promote_file_to_downloadzone(trans_html_file, rename_file=None, chatbot=chatbot)
translate_pdf = [f for f in glob.glob(f'{project_folder}/**/merge_translate_zh.pdf', recursive=True)][0]
promote_file_to_downloadzone(translate_pdf, rename_file=None, chatbot=chatbot)
comparison_pdf = [f for f in glob.glob(f'{project_folder}/**/comparison.pdf', recursive=True)][0]
promote_file_to_downloadzone(comparison_pdf, rename_file=None, chatbot=chatbot)
zip_res = zip_result(project_folder)
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
return True
except:
report_exception(chatbot, history, b=f"发现重复上传,但是无法找到相关文件")
yield from update_ui(chatbot=chatbot, history=history)
chatbot.append([f"没有相关文件", '尝试重新翻译PDF...'])
yield from update_ui(chatbot=chatbot, history=history)
except_flag = True
elif not repeat or except_flag:
yield from update_ui_lastest_msg(f"未发现重复上传", chatbot=chatbot, history=history)
# <-------------- convert pdf into tex ------------->
chatbot.append([f"解析项目: {txt}", "正在将PDF转换为tex项目,请耐心等待..."])
yield from update_ui(chatbot=chatbot, history=history)
project_folder = pdf2tex_project(file_manifest[0])
if project_folder is None:
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"PDF转换为tex项目失败")
yield from update_ui(chatbot=chatbot, history=history)
return False
# <-------------- translate latex file into Chinese ------------->
yield from update_ui_lastest_msg("正在tex项目将翻译为中文...", chatbot=chatbot, history=history)
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
if len(file_manifest) == 0:
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}")
if plugin_kwargs.get("method", "") == 'MATHPIX':
app_id, app_key = get_conf('MATHPIX_APPID', 'MATHPIX_APPKEY')
if len(app_id) == 0 or len(app_key) == 0:
report_exception(chatbot, history, a="缺失 MATHPIX_APPID 和 MATHPIX_APPKEY。", b=f"请配置 MATHPIX_APPID 和 MATHPIX_APPKEY")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
if plugin_kwargs.get("method", "") == 'DOC2X':
app_id, app_key = "", ""
DOC2X_API_KEY = get_conf('DOC2X_API_KEY')
if len(DOC2X_API_KEY) == 0:
report_exception(chatbot, history, a="缺失 DOC2X_API_KEY。", b=f"请配置 DOC2X_API_KEY")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# <-------------- if is a zip/tar file ------------->
project_folder = desend_to_extracted_folder_if_exist(project_folder)
hash_tag = map_file_to_sha256(file_manifest[0])
# <-------------- move latex project away from temp folder ------------->
project_folder = move_project(project_folder)
# # <-------------- check repeated pdf ------------->
# chatbot.append([f"检查PDF是否被重复上传", "正在检查..."])
# yield from update_ui(chatbot=chatbot, history=history)
# repeat, project_folder = check_repeat_upload(file_manifest[0], hash_tag)
# <-------------- set a hash tag for repeat-checking ------------->
with open(pj(project_folder, hash_tag + '.tag'), 'w') as f:
f.write(hash_tag)
f.close()
# if repeat:
# yield from update_ui_lastest_msg(f"发现重复上传,请查收结果(压缩包)...", chatbot=chatbot, history=history)
# try:
# translate_pdf = [f for f in glob.glob(f'{project_folder}/**/merge_translate_zh.pdf', recursive=True)][0]
# promote_file_to_downloadzone(translate_pdf, rename_file=None, chatbot=chatbot)
# comparison_pdf = [f for f in glob.glob(f'{project_folder}/**/comparison.pdf', recursive=True)][0]
# promote_file_to_downloadzone(comparison_pdf, rename_file=None, chatbot=chatbot)
# zip_res = zip_result(project_folder)
# promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
# return
# except:
# report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"发现重复上传,但是无法找到相关文件")
# yield from update_ui(chatbot=chatbot, history=history)
# else:
# yield from update_ui_lastest_msg(f"未发现重复上传", chatbot=chatbot, history=history)
# <-------------- convert pdf into tex ------------->
chatbot.append([f"解析项目: {txt}", "正在将PDF转换为tex项目,请耐心等待..."])
yield from update_ui(chatbot=chatbot, history=history)
project_folder = pdf2tex_project(file_manifest[0], plugin_kwargs)
if project_folder is None:
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"PDF转换为tex项目失败")
yield from update_ui(chatbot=chatbot, history=history)
return False
# <-------------- translate latex file into Chinese ------------->
yield from update_ui_lastest_msg("正在tex项目将翻译为中文...", chatbot=chatbot, history=history)
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
if len(file_manifest) == 0:
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# <-------------- if is a zip/tar file ------------->
project_folder = desend_to_extracted_folder_if_exist(project_folder)
# <-------------- move latex project away from temp folder ------------->
from shared_utils.fastapi_server import validate_path_safety
validate_path_safety(project_folder, chatbot.get_user())
project_folder = move_project(project_folder)
# <-------------- set a hash tag for repeat-checking ------------->
with open(pj(project_folder, hash_tag + '.tag'), 'w') as f:
f.write(hash_tag)
f.close()
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
if not os.path.exists(project_folder + '/merge_translate_zh.tex'):
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
chatbot, history, system_prompt, mode='translate_zh',
switch_prompt=_switch_prompt_)
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
if not os.path.exists(project_folder + '/merge_translate_zh.tex'):
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
chatbot, history, system_prompt, mode='translate_zh',
switch_prompt=_switch_prompt_)
# <-------------- compile PDF ------------->
yield from update_ui_lastest_msg("正在将翻译好的项目tex项目编译为PDF...", chatbot=chatbot, history=history)
success = yield from 编译Latex(chatbot, history, main_file_original='merge',
main_file_modified='merge_translate_zh', mode='translate_zh',
work_folder_original=project_folder, work_folder_modified=project_folder,
work_folder=project_folder)
# <-------------- compile PDF ------------->
yield from update_ui_lastest_msg("正在将翻译好的项目tex项目编译为PDF...", chatbot=chatbot, history=history)
success = yield from 编译Latex(chatbot, history, main_file_original='merge',
main_file_modified='merge_translate_zh', mode='translate_zh',
work_folder_original=project_folder, work_folder_modified=project_folder,
work_folder=project_folder)
# <-------------- zip PDF ------------->
zip_res = zip_result(project_folder)
if success:
chatbot.append((f"成功啦", '请查收结果(压缩包)...'))
yield from update_ui(chatbot=chatbot, history=history);
time.sleep(1) # 刷新界面
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
else:
chatbot.append((f"失败了",
'虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体见Github wiki ...'))
yield from update_ui(chatbot=chatbot, history=history);
time.sleep(1) # 刷新界面
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
# <-------------- zip PDF ------------->
zip_res = zip_result(project_folder)
if success:
chatbot.append((f"成功啦", '请查收结果(压缩包)...'))
yield from update_ui(chatbot=chatbot, history=history);
time.sleep(1) # 刷新界面
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
else:
chatbot.append((f"失败了",
'虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体见Github wiki ...'))
yield from update_ui(chatbot=chatbot, history=history);
time.sleep(1) # 刷新界面
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
# <-------------- we are done ------------->
return success
# <-------------- we are done ------------->
return success

查看文件

@@ -0,0 +1,78 @@
from crazy_functions.Latex_Function import Latex翻译中文并重新编译PDF, PDF翻译中文并重新编译PDF
from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
class Arxiv_Localize(GptAcademicPluginTemplate):
def __init__(self):
"""
请注意`execute`会执行在不同的线程中,因此您在定义和使用类变量时,应当慎之又慎!
"""
pass
def define_arg_selection_menu(self):
"""
定义插件的二级选项菜单
第一个参数,名称`main_input`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description`,`default_value`为默认值;
第二个参数,名称`advanced_arg`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description`,`default_value`为默认值;
第三个参数,名称`allow_cache`,参数`type`声明这是一个下拉菜单,下拉菜单上方显示`title`+`description`,下拉菜单的选项为`options`,`default_value`为下拉菜单默认值;
"""
gui_definition = {
"main_input":
ArgProperty(title="ArxivID", description="输入Arxiv的ID或者网址", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
"advanced_arg":
ArgProperty(title="额外的翻译提示词",
description=r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
default_value="", type="string").model_dump_json(), # 高级参数输入区,自动同步
"allow_cache":
ArgProperty(title="是否允许从缓存中调取结果", options=["允许缓存", "从头执行"], default_value="允许缓存", description="", type="dropdown").model_dump_json(),
}
return gui_definition
def execute(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
执行插件
"""
allow_cache = plugin_kwargs["allow_cache"]
advanced_arg = plugin_kwargs["advanced_arg"]
if allow_cache == "从头执行": plugin_kwargs["advanced_arg"] = "--no-cache " + plugin_kwargs["advanced_arg"]
yield from Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
class PDF_Localize(GptAcademicPluginTemplate):
def __init__(self):
"""
请注意`execute`会执行在不同的线程中,因此您在定义和使用类变量时,应当慎之又慎!
"""
pass
def define_arg_selection_menu(self):
"""
定义插件的二级选项菜单
"""
gui_definition = {
"main_input":
ArgProperty(title="PDF文件路径", description="未指定路径,请上传文件后,再点击该插件", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
"advanced_arg":
ArgProperty(title="额外的翻译提示词",
description=r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
default_value="", type="string").model_dump_json(), # 高级参数输入区,自动同步
"method":
ArgProperty(title="采用哪种方法执行转换", options=["MATHPIX", "DOC2X"], default_value="DOC2X", description="", type="dropdown").model_dump_json(),
}
return gui_definition
def execute(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
执行插件
"""
yield from PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)

查看文件

@@ -1,5 +1,5 @@
import glob, time, os, re, logging
from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion
import glob, shutil, os, re, logging
from toolbox import update_ui, trimmed_format_exc, gen_time_str
from toolbox import CatchException, report_exception, get_log_folder
from toolbox import write_history_to_file, promote_file_to_downloadzone
fast_debug = False
@@ -18,7 +18,7 @@ class PaperFileGroup():
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
self.get_token_num = get_token_num
def run_file_split(self, max_token_limit=1900):
def run_file_split(self, max_token_limit=2048):
"""
将长文本分离开来
"""
@@ -64,25 +64,25 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
pfg.file_contents.append(file_content)
# <-------- 拆分过长的Markdown文件 ---------->
pfg.run_file_split(max_token_limit=1500)
pfg.run_file_split(max_token_limit=2048)
n_split = len(pfg.sp_file_contents)
# <-------- 多线程翻译开始 ---------->
if language == 'en->zh':
inputs_array = ["This is a Markdown file, translate it into Chinese, do not modify any existing Markdown commands:" +
inputs_array = ["This is a Markdown file, translate it into Chinese, do NOT modify any existing Markdown commands, do NOT use code wrapper (```), ONLY answer me with translated results:" +
f"\n\n{frag}" for frag in pfg.sp_file_contents]
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
sys_prompt_array = ["You are a professional academic paper translator." + plugin_kwargs.get("additional_prompt", "") for _ in range(n_split)]
elif language == 'zh->en':
inputs_array = [f"This is a Markdown file, translate it into English, do not modify any existing Markdown commands:" +
inputs_array = [f"This is a Markdown file, translate it into English, do NOT modify any existing Markdown commands, do NOT use code wrapper (```), ONLY answer me with translated results:" +
f"\n\n{frag}" for frag in pfg.sp_file_contents]
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
sys_prompt_array = ["You are a professional academic paper translator." + plugin_kwargs.get("additional_prompt", "") for _ in range(n_split)]
else:
inputs_array = [f"This is a Markdown file, translate it into {language}, do not modify any existing Markdown commands, only answer me with translated results:" +
inputs_array = [f"This is a Markdown file, translate it into {language}, do NOT modify any existing Markdown commands, do NOT use code wrapper (```), ONLY answer me with translated results:" +
f"\n\n{frag}" for frag in pfg.sp_file_contents]
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
sys_prompt_array = ["You are a professional academic paper translator." + plugin_kwargs.get("additional_prompt", "") for _ in range(n_split)]
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
inputs_array=inputs_array,
@@ -99,7 +99,12 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
for i_say, gpt_say in zip(gpt_response_collection[0::2], gpt_response_collection[1::2]):
pfg.sp_file_result.append(gpt_say)
pfg.merge_result()
pfg.write_result(language)
output_file_arr = pfg.write_result(language)
for output_file in output_file_arr:
promote_file_to_downloadzone(output_file, chatbot=chatbot)
if 'markdown_expected_output_path' in plugin_kwargs:
expected_f_name = plugin_kwargs['markdown_expected_output_path']
shutil.copyfile(output_file, expected_f_name)
except:
logging.error(trimmed_format_exc())
@@ -159,7 +164,6 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
"函数插件功能?",
"对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
disable_auto_promotion(chatbot)
# 尝试导入依赖,如果缺少依赖,则给出安装建议
try:
@@ -199,7 +203,6 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
"函数插件功能?",
"对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
disable_auto_promotion(chatbot)
# 尝试导入依赖,如果缺少依赖,则给出安装建议
try:
@@ -232,7 +235,6 @@ def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history,
"函数插件功能?",
"对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
disable_auto_promotion(chatbot)
# 尝试导入依赖,如果缺少依赖,则给出安装建议
try:

查看文件

@@ -0,0 +1,83 @@
from toolbox import CatchException, check_packages, get_conf
from toolbox import update_ui, update_ui_lastest_msg, disable_auto_promotion
from toolbox import trimmed_format_exc_markdown
from crazy_functions.crazy_utils import get_files_from_everything
from crazy_functions.pdf_fns.parse_pdf import get_avail_grobid_url
from crazy_functions.pdf_fns.parse_pdf_via_doc2x import 解析PDF_基于DOC2X
from crazy_functions.pdf_fns.parse_pdf_legacy import 解析PDF_简单拆解
from crazy_functions.pdf_fns.parse_pdf_grobid import 解析PDF_基于GROBID
from shared_utils.colorful import *
@CatchException
def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
disable_auto_promotion(chatbot)
# 基本信息:功能、贡献者
chatbot.append([None, "插件功能批量翻译PDF文档。函数插件贡献者: Binary-Husky"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# 尝试导入依赖,如果缺少依赖,则给出安装建议
try:
check_packages(["fitz", "tiktoken", "scipdf"])
except:
chatbot.append([None, f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf tiktoken scipdf_parser```。"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# 清空历史,以免输入溢出
history = []
success, file_manifest, project_folder = get_files_from_everything(txt, type='.pdf')
# 检测输入参数,如没有给定输入参数,直接退出
if (not success) and txt == "": txt = '空空如也的输入栏。提示请先上传文件把PDF文件拖入对话'
# 如果没找到任何文件
if len(file_manifest) == 0:
chatbot.append([None, f"找不到任何.pdf拓展名的文件: {txt}"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# 开始正式执行任务
method = plugin_kwargs.get("pdf_parse_method", None)
if method == "DOC2X":
# ------- 第一种方法,效果最好,但是需要DOC2X服务 -------
DOC2X_API_KEY = get_conf("DOC2X_API_KEY")
if len(DOC2X_API_KEY) != 0:
try:
yield from 解析PDF_基于DOC2X(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, DOC2X_API_KEY, user_request)
return
except:
chatbot.append([None, f"DOC2X服务不可用,现在将执行效果稍差的旧版代码。{trimmed_format_exc_markdown()}"])
yield from update_ui(chatbot=chatbot, history=history)
if method == "GROBID":
# ------- 第二种方法,效果次优 -------
grobid_url = get_avail_grobid_url()
if grobid_url is not None:
yield from 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url)
return
if method == "ClASSIC":
# ------- 第三种方法,早期代码,效果不理想 -------
yield from update_ui_lastest_msg("GROBID服务不可用,请检查config中的GROBID_URL。作为替代,现在将执行效果稍差的旧版代码。", chatbot, history, delay=3)
yield from 解析PDF_简单拆解(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
return
if method is None:
# ------- 以上三种方法都试一遍 -------
DOC2X_API_KEY = get_conf("DOC2X_API_KEY")
if len(DOC2X_API_KEY) != 0:
try:
yield from 解析PDF_基于DOC2X(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, DOC2X_API_KEY, user_request)
return
except:
chatbot.append([None, f"DOC2X服务不可用,正在尝试GROBID。{trimmed_format_exc_markdown()}"])
yield from update_ui(chatbot=chatbot, history=history)
grobid_url = get_avail_grobid_url()
if grobid_url is not None:
yield from 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url)
return
yield from update_ui_lastest_msg("GROBID服务不可用,请检查config中的GROBID_URL。作为替代,现在将执行效果稍差的旧版代码。", chatbot, history, delay=3)
yield from 解析PDF_简单拆解(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
return

查看文件

@@ -0,0 +1,33 @@
from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
from .PDF_Translate import 批量翻译PDF文档
class PDF_Tran(GptAcademicPluginTemplate):
def __init__(self):
"""
请注意`execute`会执行在不同的线程中,因此您在定义和使用类变量时,应当慎之又慎!
"""
pass
def define_arg_selection_menu(self):
"""
定义插件的二级选项菜单
"""
gui_definition = {
"main_input":
ArgProperty(title="PDF文件路径", description="未指定路径,请上传文件后,再点击该插件", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
"additional_prompt":
ArgProperty(title="额外提示词", description="例如:对专有名词、翻译语气等方面的要求", default_value="", type="string").model_dump_json(), # 高级参数输入区,自动同步
"pdf_parse_method":
ArgProperty(title="PDF解析方法", options=["DOC2X", "GROBID", "ClASSIC"], description="", default_value="GROBID", type="dropdown").model_dump_json(),
}
return gui_definition
def execute(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
执行插件
"""
main_input = plugin_kwargs["main_input"]
additional_prompt = plugin_kwargs["additional_prompt"]
pdf_parse_method = plugin_kwargs["pdf_parse_method"]
yield from 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)

查看文件

@@ -1,12 +1,12 @@
from toolbox import update_ui, promote_file_to_downloadzone, disable_auto_promotion
from toolbox import update_ui, promote_file_to_downloadzone
from toolbox import CatchException, report_exception, write_history_to_file
from .crazy_utils import input_clipping
from shared_utils.fastapi_server import validate_path_safety
from crazy_functions.crazy_utils import input_clipping
def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
import os, copy
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
disable_auto_promotion(chatbot=chatbot)
summary_batch_isolation = True
inputs_array = []
@@ -23,7 +23,7 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
file_content = f.read()
prefix = "接下来请你逐文件分析下面的工程" if index==0 else ""
i_say = prefix + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```'
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {fp}'
i_say_show_user = prefix + f'[{index+1}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {fp}'
# 装载请求内容
inputs_array.append(i_say)
inputs_show_user_array.append(i_say_show_user)
@@ -128,6 +128,7 @@ def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
import glob, os
if os.path.exists(txt):
project_folder = txt
validate_path_safety(project_folder, chatbot.get_user())
else:
if txt == "": txt = '空空如也的输入栏'
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
@@ -146,6 +147,7 @@ def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
import glob, os
if os.path.exists(txt):
project_folder = txt
validate_path_safety(project_folder, chatbot.get_user())
else:
if txt == "": txt = '空空如也的输入栏'
report_exception(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
@@ -164,6 +166,7 @@ def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, his
import glob, os
if os.path.exists(txt):
project_folder = txt
validate_path_safety(project_folder, chatbot.get_user())
else:
if txt == "": txt = '空空如也的输入栏'
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
@@ -184,6 +187,7 @@ def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system
import glob, os
if os.path.exists(txt):
project_folder = txt
validate_path_safety(project_folder, chatbot.get_user())
else:
if txt == "": txt = '空空如也的输入栏'
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
@@ -206,6 +210,7 @@ def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
import glob, os
if os.path.exists(txt):
project_folder = txt
validate_path_safety(project_folder, chatbot.get_user())
else:
if txt == "": txt = '空空如也的输入栏'
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
@@ -228,6 +233,7 @@ def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
import glob, os
if os.path.exists(txt):
project_folder = txt
validate_path_safety(project_folder, chatbot.get_user())
else:
if txt == "": txt = '空空如也的输入栏'
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
@@ -257,6 +263,7 @@ def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
import glob, os
if os.path.exists(txt):
project_folder = txt
validate_path_safety(project_folder, chatbot.get_user())
else:
if txt == "": txt = '空空如也的输入栏'
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
@@ -278,6 +285,7 @@ def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
import glob, os
if os.path.exists(txt):
project_folder = txt
validate_path_safety(project_folder, chatbot.get_user())
else:
if txt == "": txt = '空空如也的输入栏'
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
@@ -298,6 +306,7 @@ def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
import glob, os
if os.path.exists(txt):
project_folder = txt
validate_path_safety(project_folder, chatbot.get_user())
else:
if txt == "": txt = '空空如也的输入栏'
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
@@ -320,6 +329,7 @@ def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
import glob, os
if os.path.exists(txt):
project_folder = txt
validate_path_safety(project_folder, chatbot.get_user())
else:
if txt == "": txt = '空空如也的输入栏'
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
@@ -357,6 +367,7 @@ def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
import glob, os, re
if os.path.exists(txt):
project_folder = txt
validate_path_safety(project_folder, chatbot.get_user())
else:
if txt == "": txt = '空空如也的输入栏'
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")

查看文件

@@ -0,0 +1,138 @@
import os, copy, time
from toolbox import CatchException, report_exception, update_ui, zip_result, promote_file_to_downloadzone, update_ui_lastest_msg, get_conf, generate_file_link
from shared_utils.fastapi_server import validate_path_safety
from crazy_functions.crazy_utils import input_clipping
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from crazy_functions.agent_fns.python_comment_agent import PythonCodeComment
from crazy_functions.diagram_fns.file_tree import FileNode
from shared_utils.advanced_markdown_format import markdown_convertion_for_file
def 注释源代码(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
summary_batch_isolation = True
inputs_array = []
inputs_show_user_array = []
history_array = []
sys_prompt_array = []
assert len(file_manifest) <= 512, "源文件太多超过512个, 请缩减输入文件的数量。或者,您也可以选择删除此行警告,并修改代码拆分file_manifest列表,从而实现分批次处理。"
# 建立文件树
file_tree_struct = FileNode("root", build_manifest=True)
for file_path in file_manifest:
file_tree_struct.add_file(file_path, file_path)
# <第一步,逐个文件分析,多线程>
for index, fp in enumerate(file_manifest):
# 读取文件
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
file_content = f.read()
prefix = ""
i_say = prefix + f'Please conclude the following source code at {os.path.relpath(fp, project_folder)} with only one sentence, the code is:\n```{file_content}```'
i_say_show_user = prefix + f'[{index+1}/{len(file_manifest)}] 请用一句话对下面的程序文件做一个整体概述: {fp}'
# 装载请求内容
MAX_TOKEN_SINGLE_FILE = 2560
i_say, _ = input_clipping(inputs=i_say, history=[], max_token_limit=MAX_TOKEN_SINGLE_FILE)
inputs_array.append(i_say)
inputs_show_user_array.append(i_say_show_user)
history_array.append([])
sys_prompt_array.append("You are a software architecture analyst analyzing a source code project. Do not dig into details, tell me what the code is doing in general. Your answer must be short, simple and clear.")
# 文件读取完成,对每一个源代码文件,生成一个请求线程,发送到大模型进行分析
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
inputs_array = inputs_array,
inputs_show_user_array = inputs_show_user_array,
history_array = history_array,
sys_prompt_array = sys_prompt_array,
llm_kwargs = llm_kwargs,
chatbot = chatbot,
show_user_at_complete = True
)
# <第二步,逐个文件分析,生成带注释文件>
from concurrent.futures import ThreadPoolExecutor
executor = ThreadPoolExecutor(max_workers=get_conf('DEFAULT_WORKER_NUM'))
def _task_multi_threading(i_say, gpt_say, fp, file_tree_struct):
pcc = PythonCodeComment(llm_kwargs, language='English')
pcc.read_file(path=fp, brief=gpt_say)
revised_path, revised_content = pcc.begin_comment_source_code(None, None)
file_tree_struct.manifest[fp].revised_path = revised_path
file_tree_struct.manifest[fp].revised_content = revised_content
# <将结果写回源文件>
with open(fp, 'w', encoding='utf-8') as f:
f.write(file_tree_struct.manifest[fp].revised_content)
# <生成对比html>
with open("crazy_functions/agent_fns/python_comment_compare.html", 'r', encoding='utf-8') as f:
html_template = f.read()
warp = lambda x: "```python\n\n" + x + "\n\n```"
from themes.theme import advanced_css
html_template = html_template.replace("ADVANCED_CSS", advanced_css)
html_template = html_template.replace("REPLACE_CODE_FILE_LEFT", pcc.get_markdown_block_in_html(markdown_convertion_for_file(warp(pcc.original_content))))
html_template = html_template.replace("REPLACE_CODE_FILE_RIGHT", pcc.get_markdown_block_in_html(markdown_convertion_for_file(warp(revised_content))))
compare_html_path = fp + '.compare.html'
file_tree_struct.manifest[fp].compare_html = compare_html_path
with open(compare_html_path, 'w', encoding='utf-8') as f:
f.write(html_template)
print('done 1')
chatbot.append([None, f"正在处理:"])
futures = []
for i_say, gpt_say, fp in zip(gpt_response_collection[0::2], gpt_response_collection[1::2], file_manifest):
future = executor.submit(_task_multi_threading, i_say, gpt_say, fp, file_tree_struct)
futures.append(future)
cnt = 0
while True:
cnt += 1
time.sleep(3)
worker_done = [h.done() for h in futures]
remain = len(worker_done) - sum(worker_done)
# <展示已经完成的部分>
preview_html_list = []
for done, fp in zip(worker_done, file_manifest):
if not done: continue
preview_html_list.append(file_tree_struct.manifest[fp].compare_html)
file_links = generate_file_link(preview_html_list)
yield from update_ui_lastest_msg(
f"剩余源文件数量: {remain}.\n\n" +
f"已完成的文件: {sum(worker_done)}.\n\n" +
file_links +
"\n\n" +
''.join(['.']*(cnt % 10 + 1)
), chatbot=chatbot, history=history, delay=0)
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
if all(worker_done):
executor.shutdown()
break
# <第四步,压缩结果>
zip_res = zip_result(project_folder)
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
# <END>
chatbot.append((None, "所有源文件均已处理完毕。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@CatchException
def 注释Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
history = [] # 清空历史,以免输入溢出
import glob, os
if os.path.exists(txt):
project_folder = txt
validate_path_safety(project_folder, chatbot.get_user())
else:
if txt == "": txt = '空空如也的输入栏'
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)]
if len(file_manifest) == 0:
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
yield from 注释源代码(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)

查看文件

@@ -0,0 +1,391 @@
from toolbox import CatchException, update_ui
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from request_llms.bridge_all import predict_no_ui_long_connection
import datetime
import re
import os
from textwrap import dedent
# TODO: 解决缩进问题
find_function_end_prompt = '''
Below is a page of code that you need to read. This page may not yet complete, you job is to split this page to sperate functions, class functions etc.
- Provide the line number where the first visible function ends.
- Provide the line number where the next visible function begins.
- If there are no other functions in this page, you should simply return the line number of the last line.
- Only focus on functions declared by `def` keyword. Ignore inline functions. Ignore function calls.
------------------ Example ------------------
INPUT:
```
L0000 |import sys
L0001 |import re
L0002 |
L0003 |def trimmed_format_exc():
L0004 | import os
L0005 | import traceback
L0006 | str = traceback.format_exc()
L0007 | current_path = os.getcwd()
L0008 | replace_path = "."
L0009 | return str.replace(current_path, replace_path)
L0010 |
L0011 |
L0012 |def trimmed_format_exc_markdown():
L0013 | ...
L0014 | ...
```
OUTPUT:
```
<first_function_end_at>L0009</first_function_end_at>
<next_function_begin_from>L0012</next_function_begin_from>
```
------------------ End of Example ------------------
------------------ the real INPUT you need to process NOW ------------------
```
{THE_TAGGED_CODE}
```
'''
revise_funtion_prompt = '''
You need to read the following code, and revise the source code ({FILE_BASENAME}) according to following instructions:
1. You should analyze the purpose of the functions (if there are any).
2. You need to add docstring for the provided functions (if there are any).
Be aware:
1. You must NOT modify the indent of code.
2. You are NOT authorized to change or translate non-comment code, and you are NOT authorized to add empty lines either, toggle qu.
3. Use {LANG} to add comments and docstrings. Do NOT translate Chinese that is already in the code.
------------------ Example ------------------
INPUT:
```
L0000 |
L0001 |def zip_result(folder):
L0002 | t = gen_time_str()
L0003 | zip_folder(folder, get_log_folder(), f"result.zip")
L0004 | return os.path.join(get_log_folder(), f"result.zip")
L0005 |
L0006 |
```
OUTPUT:
<instruction_1_purpose>
This function compresses a given folder, and return the path of the resulting `zip` file.
</instruction_1_purpose>
<instruction_2_revised_code>
```
def zip_result(folder):
"""
Compresses the specified folder into a zip file and stores it in the log folder.
Args:
folder (str): The path to the folder that needs to be compressed.
Returns:
str: The path to the created zip file in the log folder.
"""
t = gen_time_str()
zip_folder(folder, get_log_folder(), f"result.zip") # ⭐ Execute the zipping of folder
return os.path.join(get_log_folder(), f"result.zip")
```
</instruction_2_revised_code>
------------------ End of Example ------------------
------------------ the real INPUT you need to process NOW ({FILE_BASENAME}) ------------------
```
{THE_CODE}
```
{INDENT_REMINDER}
{BRIEF_REMINDER}
{HINT_REMINDER}
'''
class PythonCodeComment():
def __init__(self, llm_kwargs, language) -> None:
self.original_content = ""
self.full_context = []
self.full_context_with_line_no = []
self.current_page_start = 0
self.page_limit = 100 # 100 lines of code each page
self.ignore_limit = 20
self.llm_kwargs = llm_kwargs
self.language = language
self.path = None
self.file_basename = None
self.file_brief = ""
def generate_tagged_code_from_full_context(self):
for i, code in enumerate(self.full_context):
number = i
padded_number = f"{number:04}"
result = f"L{padded_number}"
self.full_context_with_line_no.append(f"{result} | {code}")
return self.full_context_with_line_no
def read_file(self, path, brief):
with open(path, 'r', encoding='utf8') as f:
self.full_context = f.readlines()
self.original_content = ''.join(self.full_context)
self.file_basename = os.path.basename(path)
self.file_brief = brief
self.full_context_with_line_no = self.generate_tagged_code_from_full_context()
self.path = path
def find_next_function_begin(self, tagged_code:list, begin_and_end):
begin, end = begin_and_end
THE_TAGGED_CODE = ''.join(tagged_code)
self.llm_kwargs['temperature'] = 0
result = predict_no_ui_long_connection(
inputs=find_function_end_prompt.format(THE_TAGGED_CODE=THE_TAGGED_CODE),
llm_kwargs=self.llm_kwargs,
history=[],
sys_prompt="",
observe_window=[],
console_slience=True
)
def extract_number(text):
# 使用正则表达式匹配模式
match = re.search(r'<next_function_begin_from>L(\d+)</next_function_begin_from>', text)
if match:
# 提取匹配的数字部分并转换为整数
return int(match.group(1))
return None
line_no = extract_number(result)
if line_no is not None:
return line_no
else:
return end
def _get_next_window(self):
#
current_page_start = self.current_page_start
if self.current_page_start == len(self.full_context) + 1:
raise StopIteration
# 如果剩余的行数非常少,一鼓作气处理掉
if len(self.full_context) - self.current_page_start < self.ignore_limit:
future_page_start = len(self.full_context) + 1
self.current_page_start = future_page_start
return current_page_start, future_page_start
tagged_code = self.full_context_with_line_no[ self.current_page_start: self.current_page_start + self.page_limit]
line_no = self.find_next_function_begin(tagged_code, [self.current_page_start, self.current_page_start + self.page_limit])
if line_no > len(self.full_context) - 5:
line_no = len(self.full_context) + 1
future_page_start = line_no
self.current_page_start = future_page_start
# ! consider eof
return current_page_start, future_page_start
def dedent(self, text):
"""Remove any common leading whitespace from every line in `text`.
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Find the largest common whitespace between current line and previous
# winner.
else:
for i, (x, y) in enumerate(zip(margin, indent)):
if x != y:
margin = margin[:i]
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text, len(margin)
else:
return text, 0
def get_next_batch(self):
current_page_start, future_page_start = self._get_next_window()
return ''.join(self.full_context[current_page_start: future_page_start]), current_page_start, future_page_start
def tag_code(self, fn, hint):
code = fn
_, n_indent = self.dedent(code)
indent_reminder = "" if n_indent == 0 else "(Reminder: as you can see, this piece of code has indent made up with {n_indent} whitespace, please preseve them in the OUTPUT.)"
brief_reminder = "" if self.file_brief == "" else f"({self.file_basename} abstract: {self.file_brief})"
hint_reminder = "" if hint is None else f"(Reminder: do not ignore or modify code such as `{hint}`, provide complete code in the OUTPUT.)"
self.llm_kwargs['temperature'] = 0
result = predict_no_ui_long_connection(
inputs=revise_funtion_prompt.format(
LANG=self.language,
FILE_BASENAME=self.file_basename,
THE_CODE=code,
INDENT_REMINDER=indent_reminder,
BRIEF_REMINDER=brief_reminder,
HINT_REMINDER=hint_reminder
),
llm_kwargs=self.llm_kwargs,
history=[],
sys_prompt="",
observe_window=[],
console_slience=True
)
def get_code_block(reply):
import re
pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
matches = re.findall(pattern, reply) # find all code blocks in text
if len(matches) == 1:
return matches[0].strip('python') # code block
return None
code_block = get_code_block(result)
if code_block is not None:
code_block = self.sync_and_patch(original=code, revised=code_block)
return code_block
else:
return code
def get_markdown_block_in_html(self, html):
from bs4 import BeautifulSoup
soup = BeautifulSoup(html, 'lxml')
found_list = soup.find_all("div", class_="markdown-body")
if found_list:
res = found_list[0]
return res.prettify()
else:
return None
def sync_and_patch(self, original, revised):
"""Ensure the number of pre-string empty lines in revised matches those in original."""
def count_leading_empty_lines(s, reverse=False):
"""Count the number of leading empty lines in a string."""
lines = s.split('\n')
if reverse: lines = list(reversed(lines))
count = 0
for line in lines:
if line.strip() == '':
count += 1
else:
break
return count
original_empty_lines = count_leading_empty_lines(original)
revised_empty_lines = count_leading_empty_lines(revised)
if original_empty_lines > revised_empty_lines:
additional_lines = '\n' * (original_empty_lines - revised_empty_lines)
revised = additional_lines + revised
elif original_empty_lines < revised_empty_lines:
lines = revised.split('\n')
revised = '\n'.join(lines[revised_empty_lines - original_empty_lines:])
original_empty_lines = count_leading_empty_lines(original, reverse=True)
revised_empty_lines = count_leading_empty_lines(revised, reverse=True)
if original_empty_lines > revised_empty_lines:
additional_lines = '\n' * (original_empty_lines - revised_empty_lines)
revised = revised + additional_lines
elif original_empty_lines < revised_empty_lines:
lines = revised.split('\n')
revised = '\n'.join(lines[:-(revised_empty_lines - original_empty_lines)])
return revised
def begin_comment_source_code(self, chatbot=None, history=None):
# from toolbox import update_ui_lastest_msg
assert self.path is not None
assert '.py' in self.path # must be python source code
# write_target = self.path + '.revised.py'
write_content = ""
# with open(self.path + '.revised.py', 'w+', encoding='utf8') as f:
while True:
try:
# yield from update_ui_lastest_msg(f"({self.file_basename}) 正在读取下一段代码片段:\n", chatbot=chatbot, history=history, delay=0)
next_batch, line_no_start, line_no_end = self.get_next_batch()
# yield from update_ui_lastest_msg(f"({self.file_basename}) 处理代码片段:\n\n{next_batch}", chatbot=chatbot, history=history, delay=0)
hint = None
MAX_ATTEMPT = 2
for attempt in range(MAX_ATTEMPT):
result = self.tag_code(next_batch, hint)
try:
successful, hint = self.verify_successful(next_batch, result)
except Exception as e:
print('ignored exception:\n' + str(e))
break
if successful:
break
if attempt == MAX_ATTEMPT - 1:
# cannot deal with this, give up
result = next_batch
break
# f.write(result)
write_content += result
except StopIteration:
next_batch, line_no_start, line_no_end = [], -1, -1
return None, write_content
def verify_successful(self, original, revised):
""" Determine whether the revised code contains every line that already exists
"""
from crazy_functions.ast_fns.comment_remove import remove_python_comments
original = remove_python_comments(original)
original_lines = original.split('\n')
revised_lines = revised.split('\n')
for l in original_lines:
l = l.strip()
if '\'' in l or '\"' in l: continue # ast sometimes toggle " to '
found = False
for lt in revised_lines:
if l in lt:
found = True
break
if not found:
return False, l
return True, None

查看文件

@@ -0,0 +1,45 @@
<!DOCTYPE html>
<html lang="zh-CN">
<head>
<style>ADVANCED_CSS</style>
<meta charset="UTF-8">
<title>源文件对比</title>
<style>
body {
font-family: Arial, sans-serif;
display: flex;
justify-content: center;
align-items: center;
height: 100vh;
margin: 0;
}
.container {
display: flex;
width: 95%;
height: -webkit-fill-available;
}
.code-container {
flex: 1;
margin: 0px;
padding: 0px;
border: 1px solid #ccc;
background-color: #f9f9f9;
overflow: auto;
}
pre {
white-space: pre-wrap;
word-wrap: break-word;
}
</style>
</head>
<body>
<div class="container">
<div class="code-container">
REPLACE_CODE_FILE_LEFT
</div>
<div class="code-container">
REPLACE_CODE_FILE_RIGHT
</div>
</div>
</body>
</html>

查看文件

@@ -0,0 +1,46 @@
import ast
class CommentRemover(ast.NodeTransformer):
def visit_FunctionDef(self, node):
# 移除函数的文档字符串
if (node.body and isinstance(node.body[0], ast.Expr) and
isinstance(node.body[0].value, ast.Str)):
node.body = node.body[1:]
self.generic_visit(node)
return node
def visit_ClassDef(self, node):
# 移除类的文档字符串
if (node.body and isinstance(node.body[0], ast.Expr) and
isinstance(node.body[0].value, ast.Str)):
node.body = node.body[1:]
self.generic_visit(node)
return node
def visit_Module(self, node):
# 移除模块的文档字符串
if (node.body and isinstance(node.body[0], ast.Expr) and
isinstance(node.body[0].value, ast.Str)):
node.body = node.body[1:]
self.generic_visit(node)
return node
def remove_python_comments(source_code):
# 解析源代码为 AST
tree = ast.parse(source_code)
# 移除注释
transformer = CommentRemover()
tree = transformer.visit(tree)
# 将处理后的 AST 转换回源代码
return ast.unparse(tree)
# 示例使用
if __name__ == "__main__":
with open("source.py", "r", encoding="utf-8") as f:
source_code = f.read()
cleaned_code = remove_python_comments(source_code)
with open("cleaned_source.py", "w", encoding="utf-8") as f:
f.write(cleaned_code)

查看文件

@@ -1,9 +1,20 @@
from toolbox import update_ui, get_conf, trimmed_format_exc, get_max_token, Singleton
from shared_utils.char_visual_effect import scolling_visual_effect
import threading
import os
import logging
def input_clipping(inputs, history, max_token_limit):
"""
当输入文本 + 历史文本超出最大限制时,采取措施丢弃一部分文本。
输入:
- inputs 本次请求
- history 历史上下文
- max_token_limit 最大token限制
输出:
- inputs 本次请求经过clip
- history 历史上下文经过clip
"""
import numpy as np
from request_llms.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
@@ -158,7 +169,7 @@ def can_multi_process(llm) -> bool:
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
inputs_array, inputs_show_user_array, llm_kwargs,
chatbot, history_array, sys_prompt_array,
refresh_interval=0.2, max_workers=-1, scroller_max_len=30,
refresh_interval=0.2, max_workers=-1, scroller_max_len=75,
handle_token_exceed=True, show_user_at_complete=False,
retry_times_at_unknown_error=2,
):
@@ -283,6 +294,8 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
futures = [executor.submit(_req_gpt, index, inputs, history, sys_prompt) for index, inputs, history, sys_prompt in zip(
range(len(inputs_array)), inputs_array, history_array, sys_prompt_array)]
cnt = 0
while True:
# yield一次以刷新前端页面
time.sleep(refresh_interval)
@@ -295,8 +308,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
mutable[thread_index][1] = time.time()
# 在前端打印些好玩的东西
for thread_index, _ in enumerate(worker_done):
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
replace('\n', '').replace('`', '.').replace(' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
print_something_really_funny = f"[ ...`{scolling_visual_effect(mutable[thread_index][0], scroller_max_len)}`... ]"
observe_win.append(print_something_really_funny)
# 在前端打印些好玩的东西
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
@@ -349,7 +361,7 @@ def read_and_clean_pdf_text(fp):
import fitz, copy
import re
import numpy as np
from colorful import print亮黄, print亮绿
from shared_utils.colorful import print亮黄, print亮绿
fc = 0 # Index 0 文本
fs = 1 # Index 1 字体
fb = 2 # Index 2 框框
@@ -568,7 +580,7 @@ class nougat_interface():
from toolbox import ProxyNetworkActivate
logging.info(f'正在执行命令 {command}')
with ProxyNetworkActivate("Nougat_Download"):
process = subprocess.Popen(command, shell=True, cwd=cwd, env=os.environ)
process = subprocess.Popen(command, shell=False, cwd=cwd, env=os.environ)
try:
stdout, stderr = process.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
@@ -592,7 +604,8 @@ class nougat_interface():
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度正在加载NOUGAT... 提示首次运行需要花费较长时间下载NOUGAT参数",
chatbot=chatbot, history=history, delay=0)
self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}"', os.getcwd(), timeout=3600)
command = ['nougat', '--out', os.path.abspath(dst), os.path.abspath(fp)]
self.nougat_with_timeout(command, cwd=os.getcwd(), timeout=3600)
res = glob.glob(os.path.join(dst,'*.mmd'))
if len(res) == 0:
self.threadLock.release()

查看文件

@@ -2,7 +2,7 @@ import os
from textwrap import indent
class FileNode:
def __init__(self, name):
def __init__(self, name, build_manifest=False):
self.name = name
self.children = []
self.is_leaf = False
@@ -10,6 +10,8 @@ class FileNode:
self.parenting_ship = []
self.comment = ""
self.comment_maxlen_show = 50
self.build_manifest = build_manifest
self.manifest = {}
@staticmethod
def add_linebreaks_at_spaces(string, interval=10):
@@ -29,6 +31,7 @@ class FileNode:
level = 1
if directory_names == "":
new_node = FileNode(file_name)
self.manifest[file_path] = new_node
current_node.children.append(new_node)
new_node.is_leaf = True
new_node.comment = self.sanitize_comment(file_comment)
@@ -50,6 +53,7 @@ class FileNode:
new_node.level = level - 1
current_node = new_node
term = FileNode(file_name)
self.manifest[file_path] = term
term.level = level
term.comment = self.sanitize_comment(file_comment)
term.is_leaf = True

查看文件

@@ -92,7 +92,7 @@ class MiniGame_ResumeStory(GptAcademicGameBaseState):
def generate_story_image(self, story_paragraph):
try:
from crazy_functions.图片生成 import gen_image
from crazy_functions.Image_Generate import gen_image
prompt_ = predict_no_ui_long_connection(inputs=story_paragraph, llm_kwargs=self.llm_kwargs, history=[], sys_prompt='你需要根据用户给出的小说段落,进行简短的环境描写。要求80字以内。')
image_url, image_path = gen_image(self.llm_kwargs, prompt_, '512x512', model="dall-e-2", quality='standard', style='natural')
return f'<br/><div align="center"><img src="file={image_path}"></div>'

查看文件

@@ -62,8 +62,8 @@ class GptJsonIO():
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema)
if self.example_instruction:
schema_str = json.dumps(reduced_schema)
return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
else:
return PYDANTIC_FORMAT_INSTRUCTIONS_SIMPLE.format(schema=schema_str)

查看文件

@@ -1,10 +1,11 @@
from toolbox import update_ui, update_ui_lastest_msg, get_log_folder
from toolbox import get_conf, objdump, objload, promote_file_to_downloadzone
from toolbox import get_conf, promote_file_to_downloadzone
from .latex_toolbox import PRESERVE, TRANSFORM
from .latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace
from .latex_toolbox import reverse_forbidden_text_careful_brace, reverse_forbidden_text, convert_to_linklist, post_process
from .latex_toolbox import fix_content, find_main_tex_file, merge_tex_files, compile_latex_with_timeout
from .latex_toolbox import find_title_and_abs
from .latex_pickle_io import objdump, objload
import os, shutil
import re

查看文件

@@ -0,0 +1,46 @@
import pickle
class SafeUnpickler(pickle.Unpickler):
def get_safe_classes(self):
from crazy_functions.latex_fns.latex_actions import LatexPaperFileGroup, LatexPaperSplit
from crazy_functions.latex_fns.latex_toolbox import LinkedListNode
# 定义允许的安全类
safe_classes = {
# 在这里添加其他安全的类
'LatexPaperFileGroup': LatexPaperFileGroup,
'LatexPaperSplit': LatexPaperSplit,
'LinkedListNode': LinkedListNode,
}
return safe_classes
def find_class(self, module, name):
# 只允许特定的类进行反序列化
self.safe_classes = self.get_safe_classes()
match_class_name = None
for class_name in self.safe_classes.keys():
if (class_name in f'{module}.{name}'):
match_class_name = class_name
if module == 'numpy' or module.startswith('numpy.'):
return super().find_class(module, name)
if match_class_name is not None:
return self.safe_classes[match_class_name]
# 如果尝试加载未授权的类,则抛出异常
raise pickle.UnpicklingError(f"Attempted to deserialize unauthorized class '{name}' from module '{module}'")
def objdump(obj, file="objdump.tmp"):
with open(file, "wb+") as f:
pickle.dump(obj, f)
return
def objload(file="objdump.tmp"):
import os
if not os.path.exists(file):
return
with open(file, "rb") as f:
unpickler = SafeUnpickler(f)
return unpickler.load()

查看文件

@@ -4,7 +4,7 @@ from toolbox import promote_file_to_downloadzone
from toolbox import write_history_to_file, promote_file_to_downloadzone
from toolbox import get_conf
from toolbox import ProxyNetworkActivate
from colorful import *
from shared_utils.colorful import *
import requests
import random
import copy
@@ -72,7 +72,7 @@ def produce_report_markdown(gpt_response_collection, meta, paper_meta_info, chat
generated_conclusion_files.append(res_path)
return res_path
def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG):
def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG, plugin_kwargs={}):
from crazy_functions.pdf_fns.report_gen_html import construct_html
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
@@ -138,7 +138,7 @@ def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_fi
chatbot=chatbot,
history_array=[meta for _ in inputs_array],
sys_prompt_array=[
"请你作为一个学术翻译,负责把学术论文准确翻译成中文。注意文章中的每一句话都要翻译。" for _ in inputs_array],
"请你作为一个学术翻译,负责把学术论文准确翻译成中文。注意文章中的每一句话都要翻译。" + plugin_kwargs.get("additional_prompt", "") for _ in inputs_array],
)
# -=-=-=-=-=-=-=-= 写出Markdown文件 -=-=-=-=-=-=-=-=
produce_report_markdown(gpt_response_collection, meta, paper_meta_info, chatbot, fp, generated_conclusion_files)

查看文件

@@ -0,0 +1,26 @@
import os
from toolbox import CatchException, report_exception, get_log_folder, gen_time_str, check_packages
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
from toolbox import write_history_to_file, promote_file_to_downloadzone, get_conf, extract_archive
from crazy_functions.pdf_fns.parse_pdf import parse_pdf, translate_pdf
def 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url):
import copy, json
TOKEN_LIMIT_PER_FRAGMENT = 1024
generated_conclusion_files = []
generated_html_files = []
DST_LANG = "中文"
from crazy_functions.pdf_fns.report_gen_html import construct_html
for index, fp in enumerate(file_manifest):
chatbot.append(["当前进度:", f"正在连接GROBID服务,请稍候: {grobid_url}\n如果等待时间过长,请修改config中的GROBID_URL,可修改成本地GROBID服务。"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
article_dict = parse_pdf(fp, grobid_url)
grobid_json_res = os.path.join(get_log_folder(), gen_time_str() + "grobid.json")
with open(grobid_json_res, 'w+', encoding='utf8') as f:
f.write(json.dumps(article_dict, indent=4, ensure_ascii=False))
promote_file_to_downloadzone(grobid_json_res, chatbot=chatbot)
if article_dict is None: raise RuntimeError("解析PDF失败,请检查PDF是否损坏。")
yield from translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG, plugin_kwargs=plugin_kwargs)
chatbot.append(("给出输出文件清单", str(generated_conclusion_files + generated_html_files)))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

查看文件

@@ -1,83 +1,15 @@
from toolbox import CatchException, report_exception, get_log_folder, gen_time_str, check_packages
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
from toolbox import get_log_folder
from toolbox import update_ui, promote_file_to_downloadzone
from toolbox import write_history_to_file, promote_file_to_downloadzone
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
from .crazy_utils import read_and_clean_pdf_text
from .pdf_fns.parse_pdf import parse_pdf, get_avail_grobid_url, translate_pdf
from colorful import *
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
from crazy_functions.crazy_utils import read_and_clean_pdf_text
from shared_utils.colorful import *
import os
@CatchException
def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
disable_auto_promotion(chatbot)
# 基本信息:功能、贡献者
chatbot.append([
"函数插件功能?",
"批量翻译PDF文档。函数插件贡献者: Binary-Husky"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# 尝试导入依赖,如果缺少依赖,则给出安装建议
try:
check_packages(["fitz", "tiktoken", "scipdf"])
except:
report_exception(chatbot, history,
a=f"解析项目: {txt}",
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf tiktoken scipdf_parser```。")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# 清空历史,以免输入溢出
history = []
from .crazy_utils import get_files_from_everything
success, file_manifest, project_folder = get_files_from_everything(txt, type='.pdf')
# 检测输入参数,如没有给定输入参数,直接退出
if not success:
if txt == "": txt = '空空如也的输入栏'
# 如果没找到任何文件
if len(file_manifest) == 0:
report_exception(chatbot, history,
a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}")
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
# 开始正式执行任务
grobid_url = get_avail_grobid_url()
if grobid_url is not None:
yield from 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url)
else:
yield from update_ui_lastest_msg("GROBID服务不可用,请检查config中的GROBID_URL。作为替代,现在将执行效果稍差的旧版代码。", chatbot, history, delay=3)
yield from 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
def 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url):
import copy, json
TOKEN_LIMIT_PER_FRAGMENT = 1024
generated_conclusion_files = []
generated_html_files = []
DST_LANG = "中文"
from crazy_functions.pdf_fns.report_gen_html import construct_html
for index, fp in enumerate(file_manifest):
chatbot.append(["当前进度:", f"正在连接GROBID服务,请稍候: {grobid_url}\n如果等待时间过长,请修改config中的GROBID_URL,可修改成本地GROBID服务。"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
article_dict = parse_pdf(fp, grobid_url)
grobid_json_res = os.path.join(get_log_folder(), gen_time_str() + "grobid.json")
with open(grobid_json_res, 'w+', encoding='utf8') as f:
f.write(json.dumps(article_dict, indent=4, ensure_ascii=False))
promote_file_to_downloadzone(grobid_json_res, chatbot=chatbot)
if article_dict is None: raise RuntimeError("解析PDF失败,请检查PDF是否损坏。")
yield from translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG)
chatbot.append(("给出输出文件清单", str(generated_conclusion_files + generated_html_files)))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
def 解析PDF_简单拆解(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
"""
此函数已经弃用
注意此函数已经弃用新函数位于crazy_functions/pdf_fns/parse_pdf.py
"""
import copy
TOKEN_LIMIT_PER_FRAGMENT = 1024
@@ -116,7 +48,8 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
chatbot=chatbot,
history_array=[[paper_meta] for _ in paper_fragments],
sys_prompt_array=[
"请你作为一个学术翻译,负责把学术论文准确翻译成中文。注意文章中的每一句话都要翻译。" for _ in paper_fragments],
"请你作为一个学术翻译,负责把学术论文准确翻译成中文。注意文章中的每一句话都要翻译。" + plugin_kwargs.get("additional_prompt", "")
for _ in paper_fragments],
# max_workers=5 # OpenAI所允许的最大并行过载
)
gpt_response_collection_md = copy.deepcopy(gpt_response_collection)

查看文件

@@ -0,0 +1,213 @@
from toolbox import get_log_folder, gen_time_str, get_conf
from toolbox import update_ui, promote_file_to_downloadzone
from toolbox import promote_file_to_downloadzone, extract_archive
from toolbox import generate_file_link, zip_folder
from crazy_functions.crazy_utils import get_files_from_everything
from shared_utils.colorful import *
import os
def refresh_key(doc2x_api_key):
import requests, json
url = "https://api.doc2x.noedgeai.com/api/token/refresh"
res = requests.post(
url,
headers={"Authorization": "Bearer " + doc2x_api_key}
)
res_json = []
if res.status_code == 200:
decoded = res.content.decode("utf-8")
res_json = json.loads(decoded)
doc2x_api_key = res_json['data']['token']
else:
raise RuntimeError(format("[ERROR] status code: %d, body: %s" % (res.status_code, res.text)))
return doc2x_api_key
def 解析PDF_DOC2X_转Latex(pdf_file_path):
import requests, json, os
DOC2X_API_KEY = get_conf('DOC2X_API_KEY')
latex_dir = get_log_folder(plugin_name="pdf_ocr_latex")
doc2x_api_key = DOC2X_API_KEY
if doc2x_api_key.startswith('sk-'):
url = "https://api.doc2x.noedgeai.com/api/v1/pdf"
else:
doc2x_api_key = refresh_key(doc2x_api_key)
url = "https://api.doc2x.noedgeai.com/api/platform/pdf"
res = requests.post(
url,
files={"file": open(pdf_file_path, "rb")},
data={"ocr": "1"},
headers={"Authorization": "Bearer " + doc2x_api_key}
)
res_json = []
if res.status_code == 200:
decoded = res.content.decode("utf-8")
for z_decoded in decoded.split('\n'):
if len(z_decoded) == 0: continue
assert z_decoded.startswith("data: ")
z_decoded = z_decoded[len("data: "):]
decoded_json = json.loads(z_decoded)
res_json.append(decoded_json)
else:
raise RuntimeError(format("[ERROR] status code: %d, body: %s" % (res.status_code, res.text)))
uuid = res_json[0]['uuid']
to = "latex" # latex, md, docx
url = "https://api.doc2x.noedgeai.com/api/export"+"?request_id="+uuid+"&to="+to
res = requests.get(url, headers={"Authorization": "Bearer " + doc2x_api_key})
latex_zip_path = os.path.join(latex_dir, gen_time_str() + '.zip')
latex_unzip_path = os.path.join(latex_dir, gen_time_str())
if res.status_code == 200:
with open(latex_zip_path, "wb") as f: f.write(res.content)
else:
raise RuntimeError(format("[ERROR] status code: %d, body: %s" % (res.status_code, res.text)))
import zipfile
with zipfile.ZipFile(latex_zip_path, 'r') as zip_ref:
zip_ref.extractall(latex_unzip_path)
return latex_unzip_path
def 解析PDF_DOC2X_单文件(fp, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, DOC2X_API_KEY, user_request):
def pdf2markdown(filepath):
import requests, json, os
markdown_dir = get_log_folder(plugin_name="pdf_ocr")
doc2x_api_key = DOC2X_API_KEY
if doc2x_api_key.startswith('sk-'):
url = "https://api.doc2x.noedgeai.com/api/v1/pdf"
else:
doc2x_api_key = refresh_key(doc2x_api_key)
url = "https://api.doc2x.noedgeai.com/api/platform/pdf"
chatbot.append((None, "加载PDF文件,发送至DOC2X解析..."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
res = requests.post(
url,
files={"file": open(filepath, "rb")},
data={"ocr": "1"},
headers={"Authorization": "Bearer " + doc2x_api_key}
)
res_json = []
if res.status_code == 200:
decoded = res.content.decode("utf-8")
for z_decoded in decoded.split('\n'):
if len(z_decoded) == 0: continue
assert z_decoded.startswith("data: ")
z_decoded = z_decoded[len("data: "):]
decoded_json = json.loads(z_decoded)
res_json.append(decoded_json)
if 'limit exceeded' in decoded_json.get('status', ''):
raise RuntimeError("Doc2x API 页数受限,请联系 Doc2x 方面,并更换新的 API 秘钥。")
else:
raise RuntimeError(format("[ERROR] status code: %d, body: %s" % (res.status_code, res.text)))
uuid = res_json[0]['uuid']
to = "md" # latex, md, docx
url = "https://api.doc2x.noedgeai.com/api/export"+"?request_id="+uuid+"&to="+to
chatbot.append((None, f"读取解析: {url} ..."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
res = requests.get(url, headers={"Authorization": "Bearer " + doc2x_api_key})
md_zip_path = os.path.join(markdown_dir, gen_time_str() + '.zip')
if res.status_code == 200:
with open(md_zip_path, "wb") as f: f.write(res.content)
else:
raise RuntimeError(format("[ERROR] status code: %d, body: %s" % (res.status_code, res.text)))
promote_file_to_downloadzone(md_zip_path, chatbot=chatbot)
chatbot.append((None, f"完成解析 {md_zip_path} ..."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return md_zip_path
def deliver_to_markdown_plugin(md_zip_path, user_request):
from crazy_functions.Markdown_Translate import Markdown英译中
import shutil, re
time_tag = gen_time_str()
target_path_base = get_log_folder(chatbot.get_user())
file_origin_name = os.path.basename(md_zip_path)
this_file_path = os.path.join(target_path_base, file_origin_name)
os.makedirs(target_path_base, exist_ok=True)
shutil.copyfile(md_zip_path, this_file_path)
ex_folder = this_file_path + ".extract"
extract_archive(
file_path=this_file_path, dest_dir=ex_folder
)
# edit markdown files
success, file_manifest, project_folder = get_files_from_everything(ex_folder, type='.md')
for generated_fp in file_manifest:
# 修正一些公式问题
with open(generated_fp, 'r', encoding='utf8') as f:
content = f.read()
# 将公式中的\[ \]替换成$$
content = content.replace(r'\[', r'$$').replace(r'\]', r'$$')
# 将公式中的\( \)替换成$
content = content.replace(r'\(', r'$').replace(r'\)', r'$')
content = content.replace('```markdown', '\n').replace('```', '\n')
with open(generated_fp, 'w', encoding='utf8') as f:
f.write(content)
promote_file_to_downloadzone(generated_fp, chatbot=chatbot)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# 生成在线预览html
file_name = '在线预览翻译(原文)' + gen_time_str() + '.html'
preview_fp = os.path.join(ex_folder, file_name)
from shared_utils.advanced_markdown_format import markdown_convertion_for_file
with open(generated_fp, "r", encoding="utf-8") as f:
md = f.read()
# # Markdown中使用不标准的表格,需要在表格前加上一个emoji,以便公式渲染
# md = re.sub(r'^<table>', r'.<table>', md, flags=re.MULTILINE)
html = markdown_convertion_for_file(md)
with open(preview_fp, "w", encoding="utf-8") as f: f.write(html)
chatbot.append([None, f"生成在线预览:{generate_file_link([preview_fp])}"])
promote_file_to_downloadzone(preview_fp, chatbot=chatbot)
chatbot.append((None, f"调用Markdown插件 {ex_folder} ..."))
plugin_kwargs['markdown_expected_output_dir'] = ex_folder
translated_f_name = 'translated_markdown.md'
generated_fp = plugin_kwargs['markdown_expected_output_path'] = os.path.join(ex_folder, translated_f_name)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
yield from Markdown英译中(ex_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
if os.path.exists(generated_fp):
# 修正一些公式问题
with open(generated_fp, 'r', encoding='utf8') as f: content = f.read()
content = content.replace('```markdown', '\n').replace('```', '\n')
# Markdown中使用不标准的表格,需要在表格前加上一个emoji,以便公式渲染
# content = re.sub(r'^<table>', r'.<table>', content, flags=re.MULTILINE)
with open(generated_fp, 'w', encoding='utf8') as f: f.write(content)
# 生成在线预览html
file_name = '在线预览翻译' + gen_time_str() + '.html'
preview_fp = os.path.join(ex_folder, file_name)
from shared_utils.advanced_markdown_format import markdown_convertion_for_file
with open(generated_fp, "r", encoding="utf-8") as f:
md = f.read()
html = markdown_convertion_for_file(md)
with open(preview_fp, "w", encoding="utf-8") as f: f.write(html)
promote_file_to_downloadzone(preview_fp, chatbot=chatbot)
# 生成包含图片的压缩包
dest_folder = get_log_folder(chatbot.get_user())
zip_name = '翻译后的带图文档.zip'
zip_folder(source_folder=ex_folder, dest_folder=dest_folder, zip_name=zip_name)
zip_fp = os.path.join(dest_folder, zip_name)
promote_file_to_downloadzone(zip_fp, chatbot=chatbot)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
md_zip_path = yield from pdf2markdown(fp)
yield from deliver_to_markdown_plugin(md_zip_path, user_request)
def 解析PDF_基于DOC2X(file_manifest, *args):
for index, fp in enumerate(file_manifest):
yield from 解析PDF_DOC2X_单文件(fp, *args)
return

查看文件

@@ -0,0 +1,73 @@
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>GPT-Academic 翻译报告书</title>
<style>
.centered-a {
color: red;
text-align: center;
margin-bottom: 2%;
font-size: 1.5em;
}
.centered-b {
color: red;
text-align: center;
margin-top: 10%;
margin-bottom: 20%;
font-size: 1.5em;
}
.centered-c {
color: rgba(255, 0, 0, 0);
text-align: center;
margin-top: 2%;
margin-bottom: 20%;
font-size: 7em;
}
</style>
<script>
// Configure MathJax settings
MathJax = {
tex: {
inlineMath: [
['$', '$'],
['\(', '\)']
]
}
}
addEventListener('zero-md-rendered', () => {MathJax.typeset(); console.log('MathJax typeset!');})
</script>
<!-- Load MathJax library -->
<script src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js"></script>
<script
type="module"
src="https://cdn.jsdelivr.net/gh/zerodevx/zero-md@2/dist/zero-md.min.js"
></script>
</head>
<body>
<div class="test_temp1" style="width:10%; height: 500px; float:left;">
</div>
<div class="test_temp2" style="width:80%; height: 500px; float:left;">
<!-- Simply set the `src` attribute to your MD file and win -->
<div class="centered-a">
请按Ctrl+S保存此页面,否则该页面可能在几分钟后失效。
</div>
<zero-md src="translated_markdown.md" no-shadow>
</zero-md>
<div class="centered-b">
本报告由GPT-Academic开源项目生成,地址https://github.com/binary-husky/gpt_academic。
</div>
<div class="centered-c">
本报告由GPT-Academic开源项目生成,地址https://github.com/binary-husky/gpt_academic。
</div>
</div>
<div class="test_temp3" style="width:10%; height: 500px; float:left;">
</div>
</body>
</html>

查看文件

@@ -0,0 +1,52 @@
import os, json, base64
from pydantic import BaseModel, Field
from textwrap import dedent
from typing import List
class ArgProperty(BaseModel): # PLUGIN_ARG_MENU
title: str = Field(description="The title", default="")
description: str = Field(description="The description", default="")
default_value: str = Field(description="The default value", default="")
type: str = Field(description="The type", default="") # currently we support ['string', 'dropdown']
options: List[str] = Field(default=[], description="List of options available for the argument") # only used when type is 'dropdown'
class GptAcademicPluginTemplate():
def __init__(self):
# please note that `execute` method may run in different threads,
# thus you should not store any state in the plugin instance,
# which may be accessed by multiple threads
pass
def define_arg_selection_menu(self):
"""
An example as below:
```
def define_arg_selection_menu(self):
gui_definition = {
"main_input":
ArgProperty(title="main input", description="description", default_value="default_value", type="string").model_dump_json(),
"advanced_arg":
ArgProperty(title="advanced arguments", description="description", default_value="default_value", type="string").model_dump_json(),
"additional_arg_01":
ArgProperty(title="additional", description="description", default_value="default_value", type="string").model_dump_json(),
}
return gui_definition
```
"""
raise NotImplementedError("You need to implement this method in your plugin class")
def get_js_code_for_generating_menu(self, btnName):
define_arg_selection = self.define_arg_selection_menu()
if len(define_arg_selection.keys()) > 8:
raise ValueError("You can only have up to 8 arguments in the define_arg_selection")
# if "main_input" not in define_arg_selection:
# raise ValueError("You must have a 'main_input' in the define_arg_selection")
DEFINE_ARG_INPUT_INTERFACE = json.dumps(define_arg_selection)
return base64.b64encode(DEFINE_ARG_INPUT_INTERFACE.encode('utf-8')).decode('utf-8')
def execute(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
raise NotImplementedError("You need to implement this method in your plugin class")

查看文件

@@ -0,0 +1,87 @@
SearchOptimizerPrompt="""作为一个网页搜索助手,你的任务是结合历史记录,从不同角度,为“原问题”生成个不同版本的“检索词”,从而提高网页检索的精度。生成的问题要求指向对象清晰明确,并与“原问题语言相同”。例如:
历史记录:
"
Q: 对话背景。
A: 当前对话是关于 Nginx 的介绍和在Ubuntu上的使用等。
"
原问题: 怎么下载
检索词: ["Nginx 下载","Ubuntu Nginx","Ubuntu安装Nginx"]
----------------
历史记录:
"
Q: 对话背景。
A: 当前对话是关于 Nginx 的介绍和使用等。
Q: 报错 "no connection"
A: 报错"no connection"可能是因为……
"
原问题: 怎么解决
检索词: ["Nginx报错"no connection" 解决","Nginx'no connection'报错 原因","Nginx提示'no connection'"]
----------------
历史记录:
"
"
原问题: 你知道 Python 么?
检索词: ["Python","Python 使用教程。","Python 特点和优势"]
----------------
历史记录:
"
Q: 列出Java的三种特点?
A: 1. Java 是一种编译型语言。
2. Java 是一种面向对象的编程语言。
3. Java 是一种跨平台的编程语言。
"
原问题: 介绍下第2点。
检索词: ["Java 面向对象特点","Java 面向对象编程优势。","Java 面向对象编程"]
----------------
现在有历史记录:
"
{history}
"
有其原问题: {query}
直接给出最多{num}个检索词,必须以json形式给出,不得有多余字符:
"""
SearchAcademicOptimizerPrompt="""作为一个学术论文搜索助手,你的任务是结合历史记录,从不同角度,为“原问题”生成个不同版本的“检索词”,从而提高学术论文检索的精度。生成的问题要求指向对象清晰明确,并与“原问题语言相同”。例如:
历史记录:
"
Q: 对话背景。
A: 当前对话是关于深度学习的介绍和在图像识别中的应用等。
"
原问题: 怎么下载相关论文
检索词: ["深度学习 图像识别 论文下载","图像识别 深度学习 研究论文","深度学习 图像识别 论文资源","Deep Learning Image Recognition Paper Download","Image Recognition Deep Learning Research Paper"]
----------------
历史记录:
"
Q: 对话背景。
A: 当前对话是关于深度学习的介绍和应用等。
Q: 报错 "模型不收敛"
A: 报错"模型不收敛"可能是因为……
"
原问题: 怎么解决
检索词: ["深度学习 模型不收敛 解决方案 论文","深度学习 模型不收敛 原因 研究","深度学习 模型不收敛 论文","Deep Learning Model Convergence Issue Solution Paper","Deep Learning Model Convergence Problem Research"]
----------------
历史记录:
"
"
原问题: 你知道 GAN 么?
检索词: ["生成对抗网络 论文","GAN 使用教程 论文","GAN 特点和优势 研究","Generative Adversarial Network Paper","GAN Usage Tutorial Paper"]
----------------
历史记录:
"
Q: 列出机器学习的三种应用?
A: 1. 机器学习在图像识别中的应用。
2. 机器学习在自然语言处理中的应用。
3. 机器学习在推荐系统中的应用。
"
原问题: 介绍下第2点。
检索词: ["机器学习 自然语言处理 应用 论文","机器学习 自然语言处理 研究","机器学习 NLP 应用 论文","Machine Learning Natural Language Processing Application Paper","Machine Learning NLP Research"]
----------------
现在有历史记录:
"
{history}
"
有其原问题: {query}
直接给出最多{num}个检索词,必须以json形式给出,不得有多余字符:
"""

查看文件

@@ -10,7 +10,7 @@ def read_avail_plugin_enum():
from crazy_functional import get_crazy_functions
plugin_arr = get_crazy_functions()
# remove plugins with out explaination
plugin_arr = {k:v for k, v in plugin_arr.items() if 'Info' in v}
plugin_arr = {k:v for k, v in plugin_arr.items() if ('Info' in v) and ('Function' in v)}
plugin_arr_info = {"F_{:04d}".format(i):v["Info"] for i, v in enumerate(plugin_arr.values(), start=1)}
plugin_arr_dict = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)}
plugin_arr_dict_parse = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)}

查看文件

@@ -77,7 +77,7 @@ def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
i_say_show_user = prefix + f'[{index+1}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

查看文件

@@ -5,7 +5,7 @@ from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
from .crazy_utils import read_and_clean_pdf_text
from .pdf_fns.parse_pdf import parse_pdf, get_avail_grobid_url, translate_pdf
from colorful import *
from shared_utils.colorful import *
import copy
import os
import math

查看文件

@@ -12,7 +12,7 @@ def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
file_content = f.read()
i_say = f'请对下面的程序文件做一个概述,并对文件中的所有函数生成注释,使用markdown表格输出结果,文件名是{os.path.relpath(fp, project_folder)},文件内容是 ```{file_content}```'
i_say_show_user = f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述,并对文件中的所有函数生成注释: {os.path.abspath(fp)}'
i_say_show_user = f'[{index+1}/{len(file_manifest)}] 请对下面的程序文件做一个概述,并对文件中的所有函数生成注释: {os.path.abspath(fp)}'
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

查看文件

@@ -1,8 +1,11 @@
from toolbox import CatchException, update_ui, report_exception
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
import datetime
from crazy_functions.plugin_template.plugin_class_template import (
GptAcademicPluginTemplate,
)
from crazy_functions.plugin_template.plugin_class_template import ArgProperty
#以下是每类图表的PROMPT
# 以下是每类图表的PROMPT
SELECT_PROMPT = """
{subject}
=============
@@ -17,22 +20,24 @@ SELECT_PROMPT = """
8 象限提示图
不需要解释原因,仅需要输出单个不带任何标点符号的数字。
"""
#没有思维导图!!!测试发现模型始终会优先选择思维导图
#流程图
# 没有思维导图!!!测试发现模型始终会优先选择思维导图
# 流程图
PROMPT_1 = """
请你给出围绕“{subject}”的逻辑关系图,使用mermaid语法,mermaid语法举例
请你给出围绕“{subject}”的逻辑关系图,使用mermaid语法,注意需要使用双引号将内容括起来。
mermaid语法举例
```mermaid
graph TD
P(编程) --> L1(Python)
P(编程) --> L2(C)
P(编程) --> L3(C++)
P(编程) --> L4(Javascipt)
P(编程) --> L5(PHP)
P("编程") --> L1("Python")
P("编程") --> L2("C")
P("编程") --> L3("C++")
P("编程") --> L4("Javascipt")
P("编程") --> L5("PHP")
```
"""
#序列图
# 序列图
PROMPT_2 = """
请你给出围绕“{subject}”的序列图,使用mermaid语法,mermaid语法举例
请你给出围绕“{subject}”的序列图,使用mermaid语法
mermaid语法举例
```mermaid
sequenceDiagram
participant A as 用户
@@ -43,9 +48,10 @@ sequenceDiagram
B->>A: 返回数据
```
"""
#类图
# 类图
PROMPT_3 = """
请你给出围绕“{subject}”的类图,使用mermaid语法,mermaid语法举例
请你给出围绕“{subject}”的类图,使用mermaid语法
mermaid语法举例
```mermaid
classDiagram
Class01 <|-- AveryLongClass : Cool
@@ -63,9 +69,10 @@ classDiagram
Class08 <--> C2: Cool label
```
"""
#饼图
# 饼图
PROMPT_4 = """
请你给出围绕“{subject}”的饼图,使用mermaid语法,mermaid语法举例
请你给出围绕“{subject}”的饼图,使用mermaid语法,注意需要使用双引号将内容括起来。
mermaid语法举例
```mermaid
pie title Pets adopted by volunteers
"" : 386
@@ -73,38 +80,41 @@ pie title Pets adopted by volunteers
"兔子" : 15
```
"""
#甘特图
# 甘特图
PROMPT_5 = """
请你给出围绕“{subject}”的甘特图,使用mermaid语法,mermaid语法举例
请你给出围绕“{subject}”的甘特图,使用mermaid语法,注意需要使用双引号将内容括起来。
mermaid语法举例
```mermaid
gantt
title 项目开发流程
title "项目开发流程"
dateFormat YYYY-MM-DD
section 设计
需求分析 :done, des1, 2024-01-06,2024-01-08
原型设计 :active, des2, 2024-01-09, 3d
UI设计 : des3, after des2, 5d
section 开发
前端开发 :2024-01-20, 10d
后端开发 :2024-01-20, 10d
section "设计"
"需求分析" :done, des1, 2024-01-06,2024-01-08
"原型设计" :active, des2, 2024-01-09, 3d
"UI设计" : des3, after des2, 5d
section "开发"
"前端开发" :2024-01-20, 10d
"后端开发" :2024-01-20, 10d
```
"""
#状态图
# 状态图
PROMPT_6 = """
请你给出围绕“{subject}”的状态图,使用mermaid语法,mermaid语法举例
请你给出围绕“{subject}”的状态图,使用mermaid语法,注意需要使用双引号将内容括起来。
mermaid语法举例
```mermaid
stateDiagram-v2
[*] --> Still
Still --> [*]
Still --> Moving
Moving --> Still
Moving --> Crash
Crash --> [*]
[*] --> "Still"
"Still" --> [*]
"Still" --> "Moving"
"Moving" --> "Still"
"Moving" --> "Crash"
"Crash" --> [*]
```
"""
#实体关系图
# 实体关系图
PROMPT_7 = """
请你给出围绕“{subject}”的实体关系图,使用mermaid语法,mermaid语法举例
请你给出围绕“{subject}”的实体关系图,使用mermaid语法
mermaid语法举例
```mermaid
erDiagram
CUSTOMER ||--o{ ORDER : places
@@ -124,118 +134,173 @@ erDiagram
}
```
"""
#象限提示图
# 象限提示图
PROMPT_8 = """
请你给出围绕“{subject}”的象限图,使用mermaid语法,mermaid语法举例
请你给出围绕“{subject}”的象限图,使用mermaid语法,注意需要使用双引号将内容括起来。
mermaid语法举例
```mermaid
graph LR
A[Hard skill] --> B(Programming)
A[Hard skill] --> C(Design)
D[Soft skill] --> E(Coordination)
D[Soft skill] --> F(Communication)
A["Hard skill"] --> B("Programming")
A["Hard skill"] --> C("Design")
D["Soft skill"] --> E("Coordination")
D["Soft skill"] --> F("Communication")
```
"""
#思维导图
# 思维导图
PROMPT_9 = """
{subject}
==========
请给出上方内容的思维导图,充分考虑其之间的逻辑,使用mermaid语法,mermaid语法举例
请给出上方内容的思维导图,充分考虑其之间的逻辑,使用mermaid语法,注意需要使用双引号将内容括起来。
mermaid语法举例
```mermaid
mindmap
root((mindmap))
Origins
Long history
("Origins")
("Long history")
::icon(fa fa-book)
Popularisation
British popular psychology author Tony Buzan
Research
On effectiveness<br/>and features
On Automatic creation
Uses
Creative techniques
Strategic planning
Argument mapping
Tools
Pen and paper
Mermaid
("Popularisation")
("British popular psychology author Tony Buzan")
::icon(fa fa-user)
("Research")
("On effectiveness<br/>and features")
::icon(fa fa-search)
("On Automatic creation")
::icon(fa fa-robot)
("Uses")
("Creative techniques")
::icon(fa fa-lightbulb-o)
("Strategic planning")
::icon(fa fa-flag)
("Argument mapping")
::icon(fa fa-comments)
("Tools")
("Pen and paper")
::icon(fa fa-pencil)
("Mermaid")
::icon(fa fa-code)
```
"""
def 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs):
def 解析历史输入(history, llm_kwargs, file_manifest, chatbot, plugin_kwargs):
############################## <第 0 步,切割输入> ##################################
# 借用PDF切割中的函数对文本进行切割
TOKEN_LIMIT_PER_FRAGMENT = 2500
txt = str(history).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
txt = breakdown_text_to_satisfy_token_limit(txt=txt, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
txt = (
str(history).encode("utf-8", "ignore").decode()
) # avoid reading non-utf8 chars
from crazy_functions.pdf_fns.breakdown_txt import (
breakdown_text_to_satisfy_token_limit,
)
txt = breakdown_text_to_satisfy_token_limit(
txt=txt, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs["llm_model"]
)
############################## <第 1 步,迭代地历遍整个文章,提取精炼信息> ##################################
results = []
MAX_WORD_TOTAL = 4096
n_txt = len(txt)
last_iteration_result = "从以下文本中提取摘要。"
if n_txt >= 20: print('文章极长,不能达到预期效果')
if n_txt >= 20:
print("文章极长,不能达到预期效果")
for i in range(n_txt):
NUM_OF_WORD = MAX_WORD_TOTAL // n_txt
i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words in Chinese: {txt[i]}"
i_say_show_user = f"[{i+1}/{n_txt}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {txt[i][:200]} ...."
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问
llm_kwargs, chatbot,
history=["The main content of the previous section is?", last_iteration_result], # 迭代上一次的结果
sys_prompt="Extracts the main content from the text section where it is located for graphing purposes, answer me with Chinese." # 提示
)
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
i_say,
i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问
llm_kwargs,
chatbot,
history=[
"The main content of the previous section is?",
last_iteration_result,
], # 迭代上一次的结果
sys_prompt="Extracts the main content from the text section where it is located for graphing purposes, answer me with Chinese.", # 提示
)
results.append(gpt_say)
last_iteration_result = gpt_say
############################## <第 2 步,根据整理的摘要选择图表类型> ##################################
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
gpt_say = plugin_kwargs.get("advanced_arg", "") #将图表类型参数赋值为插件参数
results_txt = '\n'.join(results) #合并摘要
if gpt_say not in ['1','2','3','4','5','6','7','8','9']: #如插件参数不正确则使用对话模型判断
i_say_show_user = f'接下来将判断适合的图表类型,如连续3次判断失败将会使用流程图进行绘制'; gpt_say = "[Local Message] 收到。" # 用户提示
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=[]) # 更新UI
gpt_say = str(plugin_kwargs) # 将图表类型参数赋值为插件参数
results_txt = "\n".join(results) # 合并摘要
if gpt_say not in [
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
]: # 如插件参数不正确则使用对话模型判断
i_say_show_user = (
f"接下来将判断适合的图表类型,如连续3次判断失败将会使用流程图进行绘制"
)
gpt_say = "[Local Message] 收到。" # 用户提示
chatbot.append([i_say_show_user, gpt_say])
yield from update_ui(chatbot=chatbot, history=[]) # 更新UI
i_say = SELECT_PROMPT.format(subject=results_txt)
i_say_show_user = f'请判断适合使用的流程图类型,其中数字对应关系为:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图。由于不管提供文本是什么,模型大概率认为"思维导图"最合适,因此思维导图仅能通过参数调用。'
for i in range(3):
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say,
inputs_show_user=i_say_show_user,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
sys_prompt=""
llm_kwargs=llm_kwargs,
chatbot=chatbot,
history=[],
sys_prompt="",
)
if gpt_say in ['1','2','3','4','5','6','7','8','9']: #判断返回是否正确
if gpt_say in [
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
]: # 判断返回是否正确
break
if gpt_say not in ['1','2','3','4','5','6','7','8','9']:
gpt_say = '1'
if gpt_say not in ["1", "2", "3", "4", "5", "6", "7", "8", "9"]:
gpt_say = "1"
############################## <第 3 步,根据选择的图表类型绘制图表> ##################################
if gpt_say == '1':
if gpt_say == "1":
i_say = PROMPT_1.format(subject=results_txt)
elif gpt_say == '2':
elif gpt_say == "2":
i_say = PROMPT_2.format(subject=results_txt)
elif gpt_say == '3':
elif gpt_say == "3":
i_say = PROMPT_3.format(subject=results_txt)
elif gpt_say == '4':
elif gpt_say == "4":
i_say = PROMPT_4.format(subject=results_txt)
elif gpt_say == '5':
elif gpt_say == "5":
i_say = PROMPT_5.format(subject=results_txt)
elif gpt_say == '6':
elif gpt_say == "6":
i_say = PROMPT_6.format(subject=results_txt)
elif gpt_say == '7':
i_say = PROMPT_7.replace("{subject}", results_txt) #由于实体关系图用到了{}符号
elif gpt_say == '8':
elif gpt_say == "7":
i_say = PROMPT_7.replace("{subject}", results_txt) # 由于实体关系图用到了{}符号
elif gpt_say == "8":
i_say = PROMPT_8.format(subject=results_txt)
elif gpt_say == '9':
elif gpt_say == "9":
i_say = PROMPT_9.format(subject=results_txt)
i_say_show_user = f'请根据判断结果绘制相应的图表。如需绘制思维导图请使用参数调用,同时过大的图表可能需要复制到在线编辑器中进行渲染。'
i_say_show_user = f"请根据判断结果绘制相应的图表。如需绘制思维导图请使用参数调用,同时过大的图表可能需要复制到在线编辑器中进行渲染。"
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say,
inputs_show_user=i_say_show_user,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
sys_prompt=""
llm_kwargs=llm_kwargs,
chatbot=chatbot,
history=[],
sys_prompt="",
)
history.append(gpt_say)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
@CatchException
def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
def 生成多种Mermaid图表(
txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port
):
"""
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
@@ -248,15 +313,21 @@ def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history,
import os
# 基本信息:功能、贡献者
chatbot.append([
"函数插件功能?",
"根据当前聊天历史或指定的路径文件(文件内容优先)绘制多种mermaid图表,将会由对话模型首先判断适合的图表类型,随后绘制图表。\
\n您也可以使用插件参数指定绘制的图表类型,函数插件贡献者: Menghuan1918"])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
chatbot.append(
[
"函数插件功能?",
"根据当前聊天历史或指定的路径文件(文件内容优先)绘制多种mermaid图表,将会由对话模型首先判断适合的图表类型,随后绘制图表。\
\n您也可以使用插件参数指定绘制的图表类型,函数插件贡献者: Menghuan1918",
]
)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
if os.path.exists(txt): #如输入区无内容则直接解析历史记录
if os.path.exists(txt): # 如输入区无内容则直接解析历史记录
from crazy_functions.pdf_fns.parse_word import extract_text_from_files
file_exist, final_result, page_one, file_manifest, excption = extract_text_from_files(txt, chatbot, history)
file_exist, final_result, page_one, file_manifest, excption = (
extract_text_from_files(txt, chatbot, history)
)
else:
file_exist = False
excption = ""
@@ -264,33 +335,104 @@ def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history,
if excption != "":
if excption == "word":
report_exception(chatbot, history,
a = f"解析项目: {txt}",
b = f"找到了.doc文件,但是该文件格式不被支持,请先转化为.docx格式。")
report_exception(
chatbot,
history,
a=f"解析项目: {txt}",
b=f"找到了.doc文件,但是该文件格式不被支持,请先转化为.docx格式。",
)
elif excption == "pdf":
report_exception(chatbot, history,
a = f"解析项目: {txt}",
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
report_exception(
chatbot,
history,
a=f"解析项目: {txt}",
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。",
)
elif excption == "word_pip":
report_exception(chatbot, history,
a=f"解析项目: {txt}",
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。")
report_exception(
chatbot,
history,
a=f"解析项目: {txt}",
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。",
)
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
else:
if not file_exist:
history.append(txt) #如输入区不是文件则将输入区内容加入历史记录
i_say_show_user = f'首先你从历史记录中提取摘要。'; gpt_say = "[Local Message] 收到。" # 用户提示
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI
yield from 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs)
history.append(txt) # 如输入区不是文件则将输入区内容加入历史记录
i_say_show_user = f"首先你从历史记录中提取摘要。"
gpt_say = "[Local Message] 收到。" # 用户提示
chatbot.append([i_say_show_user, gpt_say])
yield from update_ui(chatbot=chatbot, history=history) # 更新UI
yield from 解析历史输入(
history, llm_kwargs, file_manifest, chatbot, plugin_kwargs
)
else:
file_num = len(file_manifest)
for i in range(file_num): #依次处理文件
i_say_show_user = f"[{i+1}/{file_num}]处理文件{file_manifest[i]}"; gpt_say = "[Local Message] 收到。" # 用户提示
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI
history = [] #如输入区内容为文件则清空历史记录
for i in range(file_num): # 依次处理文件
i_say_show_user = f"[{i+1}/{file_num}]处理文件{file_manifest[i]}"
gpt_say = "[Local Message] 收到。" # 用户提示
chatbot.append([i_say_show_user, gpt_say])
yield from update_ui(chatbot=chatbot, history=history) # 更新UI
history = [] # 如输入区内容为文件则清空历史记录
history.append(final_result[i])
yield from 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs)
yield from 解析历史输入(
history, llm_kwargs, file_manifest, chatbot, plugin_kwargs
)
class Mermaid_Gen(GptAcademicPluginTemplate):
def __init__(self):
pass
def define_arg_selection_menu(self):
gui_definition = {
"Type_of_Mermaid": ArgProperty(
title="绘制的Mermaid图表类型",
options=[
"由LLM决定",
"流程图",
"序列图",
"类图",
"饼图",
"甘特图",
"状态图",
"实体关系图",
"象限提示图",
"思维导图",
],
default_value="由LLM决定",
description="选择'由LLM决定'时将由对话模型判断适合的图表类型(不包括思维导图),选择其他类型时将直接绘制指定的图表类型。",
type="dropdown",
).model_dump_json(),
}
return gui_definition
def execute(
txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request
):
options = [
"由LLM决定",
"流程图",
"序列图",
"类图",
"饼图",
"甘特图",
"状态图",
"实体关系图",
"象限提示图",
"思维导图",
]
plugin_kwargs = options.index(plugin_kwargs['Type_of_Mermaid'])
yield from 生成多种Mermaid图表(
txt,
llm_kwargs,
plugin_kwargs,
chatbot,
history,
system_prompt,
user_request,
)

查看文件

@@ -13,7 +13,7 @@ def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
i_say_show_user = prefix + f'[{index+1}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

查看文件

@@ -2,6 +2,10 @@ from toolbox import CatchException, update_ui
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
import datetime
####################################################################################################################
# Demo 1: 一个非常简单的插件 #########################################################################################
####################################################################################################################
高阶功能模板函数示意图 = f"""
```mermaid
flowchart TD
@@ -26,7 +30,7 @@ flowchart TD
"""
@CatchException
def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request, num_day=5):
"""
# 高阶功能模板函数示意图https://mermaid.live/edit#pako:eNptk1tvEkEYhv8KmattQpvlvOyFCcdeeaVXuoYssBwie8gyhCIlqVoLhrbbtAWNUpEGUkyMEDW2Fmn_DDOL_8LZHdOwxrnamX3f7_3mmZk6yKhZCfAgV1KrmYKoQ9fDuKC4yChX0nld1Aou1JzjznQ5fWmejh8LYHW6vG2a47YAnlCLNSIRolnenKBXI_zRIBrcuqRT890u7jZx7zMDt-AaMbnW1--5olGiz2sQjwfoQxsZL0hxplSSU0-rop4vrzmKR6O2JxYjHmwcL2Y_HDatVMkXlf86YzHbGY9bO5j8XE7O8Nsbc3iNB3ukL2SMcH-XIQBgWoVOZzxuOxOJOyc63EPGV6ZQLENVrznViYStTiaJ2vw2M2d9bByRnOXkgCnXylCSU5quyto_IcmkbdvctELmJ-j1ASW3uB3g5xOmKqVTmqr_Na3AtuS_dtBFm8H90XJyHkDDT7S9xXWb4HGmRChx64AOL5HRpUm411rM5uh4H78Z4V7fCZzytjZz2seto9XaNPFue07clLaVZF8UNLygJ-VES8lah_n-O-5Ozc7-77NzJ0-K0yr0ZYrmHdqAk50t2RbA4qq9uNohBASw7YpSgaRkLWCCAtxAlnRZLGbJba9bPwUAC5IsCYAnn1kpJ1ZKUACC0iBSsQLVBzUlA3ioVyQ3qGhZEUrxokiehAz4nFgqk1VNVABfB1uAD_g2_AGPl-W8nMcbCvsDblADfNCz4feyobDPy3rYEMtxwYYbPFNVUoHdCPmDHBv2cP4AMfrCbiBli-Q-3afv0X6WdsIjW2-10fgDy1SAig
@@ -43,7 +47,7 @@ def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
"您正在调用插件:历史上的今天",
"[Local Message] 请注意,您正在调用一个[函数插件]的模板,该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板该函数只有20多行代码。此外我们也提供可同步处理大量文件的多线程Demo供您参考。您若希望分享新的功能模组,请不吝PR" + 高阶功能模板函数示意图))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
for i in range(5):
for i in range(int(num_day)):
currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month
currentDay = (datetime.date.today() + datetime.timedelta(days=i)).day
i_say = f'历史中哪些事件发生在{currentMonth}{currentDay}日?列举两条并发送相关图片。发送图片时,请使用Markdown,将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述该事件的一个最重要的单词。'
@@ -59,6 +63,56 @@ def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
####################################################################################################################
# Demo 2: 一个带二级菜单的插件 #######################################################################################
####################################################################################################################
from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
class Demo_Wrap(GptAcademicPluginTemplate):
def __init__(self):
"""
请注意`execute`会执行在不同的线程中,因此您在定义和使用类变量时,应当慎之又慎!
"""
pass
def define_arg_selection_menu(self):
"""
定义插件的二级选项菜单
"""
gui_definition = {
"num_day":
ArgProperty(title="日期选择", options=["仅今天", "未来3天", "未来5天"], default_value="未来3天", description="", type="dropdown").model_dump_json(),
}
return gui_definition
def execute(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
执行插件
"""
num_day = plugin_kwargs["num_day"]
if num_day == "仅今天": num_day = 1
if num_day == "未来3天": num_day = 3
if num_day == "未来5天": num_day = 5
yield from 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request, num_day=num_day)
####################################################################################################################
# Demo 3: 绘制脑图的Demo ############################################################################################
####################################################################################################################
PROMPT = """
请你给出围绕“{subject}”的逻辑关系图,使用mermaid语法,mermaid语法举例
```mermaid

查看文件

@@ -3,6 +3,9 @@
# 从NVIDIA源,从而支持显卡检查宿主的nvidia-smi中的cuda版本必须>=11.3
FROM fuqingxu/11.3.1-runtime-ubuntu20.04-with-texlive:latest
# edge-tts需要的依赖,某些pip包所需的依赖
RUN apt update && apt install ffmpeg build-essential -y
# use python3 as the system default python
WORKDIR /gpt
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8

查看文件

@@ -5,6 +5,9 @@
# 从NVIDIA源,从而支持显卡检查宿主的nvidia-smi中的cuda版本必须>=11.3
FROM fuqingxu/11.3.1-runtime-ubuntu20.04-with-texlive:latest
# edge-tts需要的依赖,某些pip包所需的依赖
RUN apt update && apt install ffmpeg build-essential -y
# use python3 as the system default python
WORKDIR /gpt
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
@@ -36,6 +39,7 @@ RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
RUN python3 -m pip install -r request_llms/requirements_newbing.txt
RUN python3 -m pip install nougat-ocr
# 预热Tiktoken模块
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'

查看文件

@@ -5,6 +5,8 @@ RUN apt-get update
RUN apt-get install -y curl proxychains curl gcc
RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing
# edge-tts需要的依赖,某些pip包所需的依赖
RUN apt update && apt install ffmpeg build-essential -y
# use python3 as the system default python
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
@@ -22,7 +24,6 @@ RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
RUN python3 -m pip install -r request_llms/requirements_newbing.txt
# 预热Tiktoken模块
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'

查看文件

@@ -23,6 +23,9 @@ RUN python3 -m pip install -r request_llms/requirements_jittorllms.txt -i https:
# 下载JittorLLMs
RUN git clone https://github.com/binary-husky/JittorLLMs.git --depth 1 request_llms/jittorllms
# edge-tts需要的依赖
RUN apt update && apt install ffmpeg -y
# 禁用缓存,确保更新代码
ADD "https://www.random.org/cgi-bin/randbyte?nbytes=10&format=h" skipcache
RUN git pull

查看文件

@@ -12,6 +12,8 @@ COPY . .
# 安装依赖
RUN pip3 install -r requirements.txt
# edge-tts需要的依赖
RUN apt update && apt install ffmpeg -y
# 可选步骤,用于预热模块
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'

查看文件

@@ -15,6 +15,9 @@ RUN pip3 install -r requirements.txt
# 安装语音插件的额外依赖
RUN pip3 install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
# edge-tts需要的依赖
RUN apt update && apt install ffmpeg -y
# 可选步骤,用于预热模块
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'

查看文件

@@ -25,6 +25,9 @@ COPY . .
# 安装依赖
RUN pip3 install -r requirements.txt
# edge-tts需要的依赖
RUN apt update && apt install ffmpeg -y
# 可选步骤,用于预热模块
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'

查看文件

@@ -19,6 +19,9 @@ RUN pip3 install transformers protobuf langchain sentence-transformers faiss-cp
RUN pip3 install unstructured[all-docs] --upgrade
RUN python3 -c 'from check_proxy import warm_up_vectordb; warm_up_vectordb()'
# edge-tts需要的依赖
RUN apt update && apt install ffmpeg -y
# 可选步骤,用于预热模块
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'

查看文件

@@ -0,0 +1,189 @@
# 实现带二级菜单的插件
## 一、如何写带有二级菜单的插件
1. 声明一个 `Class`,继承父类 `GptAcademicPluginTemplate`
```python
from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate
from crazy_functions.plugin_template.plugin_class_template import ArgProperty
class Demo_Wrap(GptAcademicPluginTemplate):
def __init__(self): ...
```
2. 声明二级菜单中需要的变量,覆盖父类的`define_arg_selection_menu`函数。
```python
class Demo_Wrap(GptAcademicPluginTemplate):
...
def define_arg_selection_menu(self):
"""
定义插件的二级选项菜单
第一个参数,名称`main_input`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description`,`default_value`为默认值;
第二个参数,名称`advanced_arg`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description`,`default_value`为默认值;
第三个参数,名称`allow_cache`,参数`type`声明这是一个下拉菜单,下拉菜单上方显示`title`+`description`,下拉菜单的选项为`options`,`default_value`为下拉菜单默认值;
"""
gui_definition = {
"main_input":
ArgProperty(title="ArxivID", description="输入Arxiv的ID或者网址", default_value="", type="string").model_dump_json(),
"advanced_arg":
ArgProperty(title="额外的翻译提示词",
description=r"如果有必要, 请在此处给出自定义翻译命令",
default_value="", type="string").model_dump_json(),
"allow_cache":
ArgProperty(title="是否允许从缓存中调取结果", options=["允许缓存", "从头执行"], default_value="允许缓存", description="无", type="dropdown").model_dump_json(),
}
return gui_definition
...
```
> [!IMPORTANT]
>
> ArgProperty 中每个条目对应一个参数,`type == "string"`时,使用文本块,`type == dropdown`时,使用下拉菜单。
>
> 注意:`main_input` 和 `advanced_arg`是两个特殊的参数。`main_input`会自动与界面右上角的`输入区`进行同步,而`advanced_arg`会自动与界面右下角的`高级参数输入区`同步。除此之外,参数名称可以任意选取。其他细节详见`crazy_functions/plugin_template/plugin_class_template.py`。
3. 编写插件程序,覆盖父类的`execute`函数。
例如:
```python
class Demo_Wrap(GptAcademicPluginTemplate):
...
...
def execute(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
"""
执行插件
plugin_kwargs字典中会包含用户的选择,与上述 `define_arg_selection_menu` 一一对应
"""
allow_cache = plugin_kwargs["allow_cache"]
advanced_arg = plugin_kwargs["advanced_arg"]
if allow_cache == "从头执行": plugin_kwargs["advanced_arg"] = "--no-cache " + plugin_kwargs["advanced_arg"]
yield from Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
```
4. 注册插件
将以下条目插入`crazy_functional.py`即可。注意,与旧插件不同的是,`Function`键值应该为None,而`Class`键值为上述插件的类名称(`Demo_Wrap`)。
```
"新插件": {
"Group": "学术",
"Color": "stop",
"AsButton": True,
"Info": "插件说明",
"Function": None,
"Class": Demo_Wrap,
},
```
5. 已经结束了,启动程序测试吧~
## 二、背后的原理需要JavaScript的前置知识
### (I) 首先介绍三个Gradio官方没有的重要前端函数
主javascript程序`common.js`中有三个Gradio官方没有的重要API
1. `get_data_from_gradio_component`
这个函数可以获取任意gradio组件的当前值,例如textbox中的字符,dropdown中的当前选项,chatbot当前的对话等等。调用方法举例
```javascript
// 获取当前的对话
let chatbot = await get_data_from_gradio_component('gpt-chatbot');
```
2. `get_gradio_component`
有时候我们不仅需要gradio组件的当前值,还需要它的label值、是否隐藏、下拉菜单其他可选选项等等,而通过这个函数可以直接获取这个组件的句柄。举例
```javascript
// 获取下拉菜单组件的句柄
var model_sel = await get_gradio_component("elem_model_sel");
// 获取它的所有属性,包括其所有可选选项
console.log(model_sel.props)
```
3. `push_data_to_gradio_component`
这个函数可以将数据推回gradio组件,例如textbox中的字符,dropdown中的当前选项等等。调用方法举例
```javascript
// 修改一个按钮上面的文本
push_data_to_gradio_component("btnName", "gradio_element_id", "string");
// 隐藏一个组件
push_data_to_gradio_component({ visible: false, __type__: 'update' }, "plugin_arg_menu", "obj");
// 修改组件label
push_data_to_gradio_component({ label: '新label的值', __type__: 'update' }, "gpt-chatbot", "obj")
// 第一个参数是value,
// - 可以是字符串调整textbox的文本,按钮的文本
// - 还可以是 { visible: false, __type__: 'update' } 这样的字典调整visible, label, choices
// 第二个参数是elem_id
// 第三个参数是"string" 或者 "obj"
```
### (II) 从点击插件到执行插件的逻辑过程
简述程序启动时把每个插件的二级菜单编码为BASE64,存储在用户的浏览器前端,用户调用对应功能时,会按照插件的BASE64编码,将平时隐藏的菜单有选择性地显示出来。
1. 启动阶段(主函数 `main.py` 中,遍历每个插件,生成二级菜单的BASE64编码,存入变量`register_advanced_plugin_init_code_arr`。
```python
def get_js_code_for_generating_menu(self, btnName):
define_arg_selection = self.define_arg_selection_menu()
DEFINE_ARG_INPUT_INTERFACE = json.dumps(define_arg_selection)
return base64.b64encode(DEFINE_ARG_INPUT_INTERFACE.encode('utf-8')).decode('utf-8')
```
2. 用户加载阶段主javascript程序`common.js`中),浏览器加载`register_advanced_plugin_init_code_arr`,存入本地的字典`advanced_plugin_init_code_lib`
```javascript
advanced_plugin_init_code_lib = {}
function register_advanced_plugin_init_code(key, code){
advanced_plugin_init_code_lib[key] = code;
}
```
3. 用户点击插件按钮(主函数 `main.py` 中时,仅执行以下javascript代码,唤醒隐藏的二级菜单生成菜单的代码在`common.js`中的`generate_menu`函数上):
```javascript
// 生成高级插件的选择菜单
function run_advanced_plugin_launch_code(key){
generate_menu(advanced_plugin_init_code_lib[key], key);
}
function on_flex_button_click(key){
run_advanced_plugin_launch_code(key);
}
```
```python
click_handle = plugins[k]["Button"].click(None, inputs=[], outputs=None, _js=f"""()=>run_advanced_plugin_launch_code("{k}")""")
```
4. 当用户点击二级菜单的执行键时,通过javascript脚本模拟点击一个隐藏按钮,触发后续程序`common.js`中的`execute_current_pop_up_plugin`,会把二级菜单中的参数缓存到`invisible_current_pop_up_plugin_arg_final`,然后模拟点击`invisible_callback_btn_for_plugin_exe`按钮)。隐藏按钮的定义在(主函数 `main.py` ),该隐藏按钮会最终触发`route_switchy_bt_with_arg`函数(定义于`themes/gui_advanced_plugin_class.py`
```python
click_handle_ng = new_plugin_callback.click(route_switchy_bt_with_arg, [
gr.State(["new_plugin_callback", "usr_confirmed_arg"] + input_combo_order),
new_plugin_callback, usr_confirmed_arg, *input_combo
], output_combo)
```
5. 最后,`route_switchy_bt_with_arg`中,会搜集所有用户参数,统一集中到`plugin_kwargs`参数中,并执行对应插件的`execute`函数。

查看文件

@@ -22,13 +22,13 @@
| crazy_functions\下载arxiv论文翻译摘要.py | 下载 `arxiv` 论文的 PDF 文件,并提取摘要和翻译 |
| crazy_functions\代码重写为全英文_多线程.py | 将Python源代码文件中的中文内容转化为英文 |
| crazy_functions\图片生成.py | 根据激励文本使用GPT模型生成相应的图像 |
| crazy_functions\对话历史存档.py | 将每次对话记录写入Markdown格式的文件中 |
| crazy_functions\Conversation_To_File.py | 将每次对话记录写入Markdown格式的文件中 |
| crazy_functions\总结word文档.py | 对输入的word文档进行摘要生成 |
| crazy_functions\总结音视频.py | 对输入的音视频文件进行摘要生成 |
| crazy_functions\批量Markdown翻译.py | 将指定目录下的Markdown文件进行中英文翻译 |
| crazy_functions\Markdown_Translate.py | 将指定目录下的Markdown文件进行中英文翻译 |
| crazy_functions\批量总结PDF文档.py | 对PDF文件进行切割和摘要生成 |
| crazy_functions\批量总结PDF文档pdfminer.py | 对PDF文件进行文本内容的提取和摘要生成 |
| crazy_functions\批量翻译PDF文档_多线程.py | 将指定目录下的PDF文件进行中英文翻译 |
| crazy_functions\PDF_Translate.py | 将指定目录下的PDF文件进行中英文翻译 |
| crazy_functions\理解PDF文档内容.py | 对PDF文件进行摘要生成和问题解答 |
| crazy_functions\生成函数注释.py | 自动生成Python函数的注释 |
| crazy_functions\联网的ChatGPT.py | 使用网络爬虫和ChatGPT模型进行聊天回答 |
@@ -155,9 +155,9 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和
该程序文件提供了一个用于生成图像的函数`图片生成`。函数实现的过程中,会调用`gen_image`函数来生成图像,并返回图像生成的网址和本地文件地址。函数有多个参数,包括`prompt`(激励文本)、`llm_kwargs`(GPT模型的参数)、`plugin_kwargs`(插件模型的参数)等。函数核心代码使用了`requests`库向OpenAI API请求图像,并做了简单的处理和保存。函数还更新了交互界面,清空聊天历史并显示正在生成图像的消息和最终的图像网址和预览。
## [18/48] 请对下面的程序文件做一个概述: crazy_functions\对话历史存档.py
## [18/48] 请对下面的程序文件做一个概述: crazy_functions\Conversation_To_File.py
这个文件是名为crazy_functions\对话历史存档.py的Python程序文件,包含了4个函数
这个文件是名为crazy_functions\Conversation_To_File.py的Python程序文件,包含了4个函数
1. write_chat_to_file(chatbot, history=None, file_name=None)用来将对话记录以Markdown格式写入文件中,并且生成文件名,如果没指定文件名则用当前时间。写入完成后将文件路径打印出来。
@@ -165,7 +165,7 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和
3. read_file_to_chat(chatbot, history, file_name):从传入的文件中读取内容,解析出对话历史记录并更新聊天显示框。
4. 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)一个主要函数,用于保存当前对话记录并提醒用户。如果用户希望加载历史记录,则调用read_file_to_chat()来更新聊天显示框。如果用户希望删除历史记录,调用删除所有本地对话历史记录()函数完成删除操作。
4. Conversation_To_File(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)一个主要函数,用于保存当前对话记录并提醒用户。如果用户希望加载历史记录,则调用read_file_to_chat()来更新聊天显示框。如果用户希望删除历史记录,调用删除所有本地对话历史记录()函数完成删除操作。
## [19/48] 请对下面的程序文件做一个概述: crazy_functions\总结word文档.py
@@ -175,9 +175,9 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和
该程序文件包括两个函数split_audio_file()和AnalyAudio(),并且导入了一些必要的库并定义了一些工具函数。split_audio_file用于将音频文件分割成多个时长相等的片段,返回一个包含所有切割音频片段文件路径的列表,而AnalyAudio用来分析音频文件,通过调用whisper模型进行音频转文字并使用GPT模型对音频内容进行概述,最终将所有总结结果写入结果文件中。
## [21/48] 请对下面的程序文件做一个概述: crazy_functions\批量Markdown翻译.py
## [21/48] 请对下面的程序文件做一个概述: crazy_functions\Markdown_Translate.py
该程序文件名为`批量Markdown翻译.py`,包含了以下功能读取Markdown文件,将长文本分离开来,将Markdown文件进行翻译英译中和中译英,整理结果并退出。程序使用了多线程以提高效率。程序使用了`tiktoken`依赖库,可能需要额外安装。文件中还有一些其他的函数和类,但与文件名所描述的功能无关。
该程序文件名为`Markdown_Translate.py`,包含了以下功能读取Markdown文件,将长文本分离开来,将Markdown文件进行翻译英译中和中译英,整理结果并退出。程序使用了多线程以提高效率。程序使用了`tiktoken`依赖库,可能需要额外安装。文件中还有一些其他的函数和类,但与文件名所描述的功能无关。
## [22/48] 请对下面的程序文件做一个概述: crazy_functions\批量总结PDF文档.py
@@ -187,9 +187,9 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和
该程序文件是一个用于批量总结PDF文档的函数插件,使用了pdfminer插件和BeautifulSoup库来提取PDF文档的文本内容,对每个PDF文件分别进行处理并生成中英文摘要。同时,该程序文件还包括一些辅助工具函数和处理异常的装饰器。
## [24/48] 请对下面的程序文件做一个概述: crazy_functions\批量翻译PDF文档_多线程.py
## [24/48] 请对下面的程序文件做一个概述: crazy_functions\PDF_Translate.py
这个程序文件是一个Python脚本,文件名为“批量翻译PDF文档_多线程.py”。它主要使用了“toolbox”、“request_gpt_model_in_new_thread_with_ui_alive”、“request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency”、“colorful”等Python库和自定义的模块“crazy_utils”的一些函数。程序实现了一个批量翻译PDF文档的功能,可以自动解析PDF文件中的基础信息,递归地切割PDF文件,翻译和处理PDF论文中的所有内容,并生成相应的翻译结果文件包括md文件和html文件。功能比较复杂,其中需要调用多个函数和依赖库,涉及到多线程操作和UI更新。文件中有详细的注释和变量命名,代码比较清晰易读。
这个程序文件是一个Python脚本,文件名为“PDF_Translate.py”。它主要使用了“toolbox”、“request_gpt_model_in_new_thread_with_ui_alive”、“request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency”、“colorful”等Python库和自定义的模块“crazy_utils”的一些函数。程序实现了一个批量翻译PDF文档的功能,可以自动解析PDF文件中的基础信息,递归地切割PDF文件,翻译和处理PDF论文中的所有内容,并生成相应的翻译结果文件包括md文件和html文件。功能比较复杂,其中需要调用多个函数和依赖库,涉及到多线程操作和UI更新。文件中有详细的注释和变量命名,代码比较清晰易读。
## [25/48] 请对下面的程序文件做一个概述: crazy_functions\理解PDF文档内容.py
@@ -331,19 +331,19 @@ check_proxy.py, colorful.py, config.py, config_private.py, core_functional.py, c
这些程序源文件提供了基础的文本和语言处理功能、工具函数和高级插件,使 Chatbot 能够处理各种复杂的学术文本问题,包括润色、翻译、搜索、下载、解析等。
## 用一张Markdown表格简要描述以下文件的功能
crazy_functions\代码重写为全英文_多线程.py, crazy_functions\图片生成.py, crazy_functions\对话历史存档.py, crazy_functions\总结word文档.py, crazy_functions\总结音视频.py, crazy_functions\批量Markdown翻译.py, crazy_functions\批量总结PDF文档.py, crazy_functions\批量总结PDF文档pdfminer.py, crazy_functions\批量翻译PDF文档_多线程.py, crazy_functions\理解PDF文档内容.py, crazy_functions\生成函数注释.py, crazy_functions\联网的ChatGPT.py, crazy_functions\解析JupyterNotebook.py, crazy_functions\解析项目源代码.py, crazy_functions\询问多个大语言模型.py, crazy_functions\读文章写摘要.py。根据以上分析,用一句话概括程序的整体功能。
crazy_functions\代码重写为全英文_多线程.py, crazy_functions\图片生成.py, crazy_functions\Conversation_To_File.py, crazy_functions\总结word文档.py, crazy_functions\总结音视频.py, crazy_functions\Markdown_Translate.py, crazy_functions\批量总结PDF文档.py, crazy_functions\批量总结PDF文档pdfminer.py, crazy_functions\PDF_Translate.py, crazy_functions\理解PDF文档内容.py, crazy_functions\生成函数注释.py, crazy_functions\联网的ChatGPT.py, crazy_functions\解析JupyterNotebook.py, crazy_functions\解析项目源代码.py, crazy_functions\询问多个大语言模型.py, crazy_functions\读文章写摘要.py。根据以上分析,用一句话概括程序的整体功能。
| 文件名 | 功能简述 |
| --- | --- |
| 代码重写为全英文_多线程.py | 将Python源代码文件中的中文内容转化为英文 |
| 图片生成.py | 根据激励文本使用GPT模型生成相应的图像 |
| 对话历史存档.py | 将每次对话记录写入Markdown格式的文件中 |
| Conversation_To_File.py | 将每次对话记录写入Markdown格式的文件中 |
| 总结word文档.py | 对输入的word文档进行摘要生成 |
| 总结音视频.py | 对输入的音视频文件进行摘要生成 |
| 批量Markdown翻译.py | 将指定目录下的Markdown文件进行中英文翻译 |
| Markdown_Translate.py | 将指定目录下的Markdown文件进行中英文翻译 |
| 批量总结PDF文档.py | 对PDF文件进行切割和摘要生成 |
| 批量总结PDF文档pdfminer.py | 对PDF文件进行文本内容的提取和摘要生成 |
| 批量翻译PDF文档_多线程.py | 将指定目录下的PDF文件进行中英文翻译 |
| PDF_Translate.py | 将指定目录下的PDF文件进行中英文翻译 |
| 理解PDF文档内容.py | 对PDF文件进行摘要生成和问题解答 |
| 生成函数注释.py | 自动生成Python函数的注释 |
| 联网的ChatGPT.py | 使用网络爬虫和ChatGPT模型进行聊天回答 |

文件差异内容过多而无法显示 加载差异

查看文件

@@ -36,15 +36,15 @@
"总结word文档": "SummarizeWordDocument",
"解析ipynb文件": "ParseIpynbFile",
"解析JupyterNotebook": "ParseJupyterNotebook",
"对话历史存档": "ConversationHistoryArchive",
"载入对话历史存档": "LoadConversationHistoryArchive",
"Conversation_To_File": "ConversationHistoryArchive",
"载入Conversation_To_File": "LoadConversationHistoryArchive",
"删除所有本地对话历史记录": "DeleteAllLocalChatHistory",
"Markdown英译中": "MarkdownTranslateFromEngToChi",
"批量Markdown翻译": "BatchTranslateMarkdown",
"Markdown_Translate": "BatchTranslateMarkdown",
"批量总结PDF文档": "BatchSummarizePDFDocuments",
"批量总结PDF文档pdfminer": "BatchSummarizePDFDocumentsUsingPDFMiner",
"批量翻译PDF文档": "BatchTranslatePDFDocuments",
"批量翻译PDF文档_多线程": "BatchTranslatePDFDocumentsUsingMultiThreading",
"PDF_Translate": "BatchTranslatePDFDocumentsUsingMultiThreading",
"谷歌检索小助手": "GoogleSearchAssistant",
"理解PDF文档内容标准文件输入": "StandardFileInputForUnderstandingPDFDocumentContent",
"理解PDF文档内容": "UnderstandingPDFDocumentContent",
@@ -1492,7 +1492,7 @@
"交互功能模板函数": "InteractiveFunctionTemplateFunction",
"交互功能函数模板": "InteractiveFunctionFunctionTemplate",
"Latex英文纠错加PDF对比": "LatexEnglishErrorCorrectionWithPDFComparison",
"Latex输出PDF": "LatexOutputPDFResult",
"Latex_Function": "LatexOutputPDFResult",
"Latex翻译中文并重新编译PDF": "TranslateChineseAndRecompilePDF",
"语音助手": "VoiceAssistant",
"微调数据集生成": "FineTuneDatasetGeneration",

查看文件

@@ -6,17 +6,14 @@
"Latex英文纠错加PDF对比": "CorrectEnglishInLatexWithPDFComparison",
"下载arxiv论文并翻译摘要": "DownloadArxivPaperAndTranslateAbstract",
"Markdown翻译指定语言": "TranslateMarkdownToSpecifiedLanguage",
"批量翻译PDF文档_多线程": "BatchTranslatePDFDocuments_MultiThreaded",
"下载arxiv论文翻译摘要": "DownloadArxivPaperTranslateAbstract",
"解析一个Python项目": "ParsePythonProject",
"解析一个Golang项目": "ParseGolangProject",
"代码重写为全英文_多线程": "RewriteCodeToEnglish_MultiThreaded",
"解析一个CSharp项目": "ParsingCSharpProject",
"删除所有本地对话历史记录": "DeleteAllLocalConversationHistoryRecords",
"批量Markdown翻译": "BatchTranslateMarkdown",
"连接bing搜索回答问题": "ConnectBingSearchAnswerQuestion",
"Langchain知识库": "LangchainKnowledgeBase",
"Latex输出PDF": "OutputPDFFromLatex",
"把字符太少的块清除为回车": "ClearBlocksWithTooFewCharactersToNewline",
"Latex精细分解与转化": "DecomposeAndConvertLatex",
"解析一个C项目的头文件": "ParseCProjectHeaderFiles",
@@ -46,7 +43,7 @@
"高阶功能模板函数": "HighOrderFunctionTemplateFunctions",
"高级功能函数模板": "AdvancedFunctionTemplate",
"总结word文档": "SummarizingWordDocuments",
"载入对话历史存档": "LoadConversationHistoryArchive",
"载入Conversation_To_File": "LoadConversationHistoryArchive",
"Latex中译英": "LatexChineseToEnglish",
"Latex英译中": "LatexEnglishToChinese",
"连接网络回答问题": "ConnectToNetworkToAnswerQuestions",
@@ -70,7 +67,6 @@
"读文章写摘要": "ReadArticleWriteSummary",
"生成函数注释": "GenerateFunctionComments",
"解析项目本身": "ParseProjectItself",
"对话历史存档": "ConversationHistoryArchive",
"专业词汇声明": "ProfessionalTerminologyDeclaration",
"解析docx": "ParseDocx",
"解析源代码新": "ParsingSourceCodeNew",
@@ -104,5 +100,11 @@
"随机小游戏": "RandomMiniGame",
"互动小游戏": "InteractiveMiniGame",
"解析历史输入": "ParseHistoricalInput",
"高阶功能模板函数示意图": "HighOrderFunctionTemplateDiagram"
"高阶功能模板函数示意图": "HighOrderFunctionTemplateDiagram",
"载入对话历史存档": "LoadChatHistoryArchive",
"对话历史存档": "ChatHistoryArchive",
"解析PDF_DOC2X_转Latex": "ParsePDF_DOC2X_toLatex",
"解析PDF_基于DOC2X": "ParsePDF_basedDOC2X",
"解析PDF_简单拆解": "ParsePDF_simpleDecomposition",
"解析PDF_DOC2X_单文件": "ParsePDF_DOC2X_singleFile"
}

查看文件

@@ -35,15 +35,15 @@
"总结word文档": "SummarizeWordDocument",
"解析ipynb文件": "ParseIpynbFile",
"解析JupyterNotebook": "ParseJupyterNotebook",
"对话历史存档": "ConversationHistoryArchive",
"载入对话历史存档": "LoadConversationHistoryArchive",
"Conversation_To_File": "ConversationHistoryArchive",
"载入Conversation_To_File": "LoadConversationHistoryArchive",
"删除所有本地对话历史记录": "DeleteAllLocalConversationHistoryRecords",
"Markdown英译中": "MarkdownEnglishToChinese",
"批量Markdown翻译": "BatchMarkdownTranslation",
"Markdown_Translate": "BatchMarkdownTranslation",
"批量总结PDF文档": "BatchSummarizePDFDocuments",
"批量总结PDF文档pdfminer": "BatchSummarizePDFDocumentsPdfminer",
"批量翻译PDF文档": "BatchTranslatePDFDocuments",
"批量翻译PDF文档_多线程": "BatchTranslatePdfDocumentsMultithreaded",
"PDF_Translate": "BatchTranslatePdfDocumentsMultithreaded",
"谷歌检索小助手": "GoogleSearchAssistant",
"理解PDF文档内容标准文件输入": "StandardFileInputForUnderstandingPdfDocumentContent",
"理解PDF文档内容": "UnderstandingPdfDocumentContent",
@@ -1468,7 +1468,7 @@
"交互功能模板函数": "InteractiveFunctionTemplateFunctions",
"交互功能函数模板": "InteractiveFunctionFunctionTemplates",
"Latex英文纠错加PDF对比": "LatexEnglishCorrectionWithPDFComparison",
"Latex输出PDF": "OutputPDFFromLatex",
"Latex_Function": "OutputPDFFromLatex",
"Latex翻译中文并重新编译PDF": "TranslateLatexToChineseAndRecompilePDF",
"语音助手": "VoiceAssistant",
"微调数据集生成": "FineTuneDatasetGeneration",

58
docs/use_tts.md 普通文件
查看文件

@@ -0,0 +1,58 @@
# 使用TTS文字转语音
## 1. 使用EDGE-TTS简单
将本项目配置项修改如下即可
```
TTS_TYPE = "EDGE_TTS"
EDGE_TTS_VOICE = "zh-CN-XiaoxiaoNeural"
```
## 2. 使用SoVITS需要有显卡
使用以下docker-compose.yml文件,先启动SoVITS服务API
1. 创建以下文件夹结构
```shell
.
├── docker-compose.yml
└── reference
├── clone_target_txt.txt
└── clone_target_wave.mp3
```
2. 其中`docker-compose.yml`为
```yaml
version: '3.8'
services:
gpt-sovits:
image: fuqingxu/sovits_gptac_trim:latest
container_name: sovits_gptac_container
working_dir: /workspace/gpt_sovits_demo
environment:
- is_half=False
- is_share=False
volumes:
- ./reference:/reference
ports:
- "19880:9880" # 19880 为 sovits api 的暴露端口,记住它
shm_size: 16G
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: "all"
capabilities: [gpu]
command: bash -c "python3 api.py"
```
3. 其中`clone_target_wave.mp3`为需要克隆的角色音频,`clone_target_txt.txt`为该音频对应的文字文本( https://wiki.biligame.com/ys/%E8%A7%92%E8%89%B2%E8%AF%AD%E9%9F%B3
4. 运行`docker-compose up`
5. 将本项目配置项修改如下即可
(19880 为 sovits api 的暴露端口,与docker-compose.yml中的端口对应)
```
TTS_TYPE = "LOCAL_SOVITS_API"
GPT_SOVITS_URL = "http://127.0.0.1:19880"
```
6. 启动本项目

46
docs/use_vllm.md 普通文件
查看文件

@@ -0,0 +1,46 @@
# 使用VLLM
## 1. 首先启动 VLLM,自行选择模型
```
python -m vllm.entrypoints.openai.api_server --model /home/hmp/llm/cache/Qwen1___5-32B-Chat --tensor-parallel-size 2 --dtype=half
```
这里使用了存储在 `/home/hmp/llm/cache/Qwen1___5-32B-Chat` 的本地模型,可以根据自己的需求更改。
## 2. 测试 VLLM
```
curl http://localhost:8000/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "/home/hmp/llm/cache/Qwen1___5-32B-Chat",
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "怎么实现一个去中心化的控制器?"}
]
}'
```
## 3. 配置本项目
```
API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"
LLM_MODEL = "vllm-/home/hmp/llm/cache/Qwen1___5-32B-Chat(max_token=4096)"
API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "http://localhost:8000/v1/chat/completions"}
```
```
"vllm-/home/hmp/llm/cache/Qwen1___5-32B-Chat(max_token=4096)"
其中
"vllm-" 是前缀(必要)
"/home/hmp/llm/cache/Qwen1___5-32B-Chat" 是模型名(必要)
"(max_token=6666)" 是配置(非必要)
```
## 4. 启动!
```
python main.py
```

735
main.py
查看文件

@@ -1,371 +1,364 @@
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
help_menu_description = \
"""Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic),
感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors).
</br></br>常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki),
如遇到Bug请前往[Bug反馈](https://github.com/binary-husky/gpt_academic/issues).
</br></br>普通对话使用说明: 1. 输入问题; 2. 点击提交
</br></br>基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮
</br></br>函数插件区使用说明: 1. 输入路径/问题, 或者上传文件; 2. 点击任意函数插件区按钮
</br></br>虚空终端使用说明: 点击虚空终端, 然后根据提示输入指令, 再次点击虚空终端
</br></br>如何保存对话: 点击保存当前的对话按钮
</br></br>如何语音对话: 请阅读Wiki
</br></br>如何临时更换API_KEY: 在输入区输入临时API_KEY后提交网页刷新后失效"""
def enable_log(PATH_LOGGING):
import logging, uuid
admin_log_path = os.path.join(PATH_LOGGING, "admin")
os.makedirs(admin_log_path, exist_ok=True)
log_dir = os.path.join(admin_log_path, "chat_secrets.log")
try:logging.basicConfig(filename=log_dir, level=logging.INFO, encoding="utf-8", format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
except:logging.basicConfig(filename=log_dir, level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
# Disable logging output from the 'httpx' logger
logging.getLogger("httpx").setLevel(logging.WARNING)
print(f"所有对话记录将自动保存在本地目录{log_dir}, 请注意自我隐私保护哦!")
def main():
import gradio as gr
if gr.__version__ not in ['3.32.9']:
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
from request_llms.bridge_all import predict
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME, ADD_WAIFU = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME', 'ADD_WAIFU')
NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE')
DARK_MODE, INIT_SYS_PROMPT, ADD_WAIFU = get_conf('DARK_MODE', 'INIT_SYS_PROMPT', 'ADD_WAIFU')
# 如果WEB_PORT是-1, 则随机选取WEB端口
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
from check_proxy import get_current_version
from themes.theme import adjust_theme, advanced_css, theme_declaration, js_code_clear, js_code_reset, js_code_show_or_hide, js_code_show_or_hide_group2
from themes.theme import js_code_for_css_changing, js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, assign_user_uuid
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
# 对话、日志记录
enable_log(PATH_LOGGING)
# 一些普通功能模块
from core_functional import get_core_functions
functional = get_core_functions()
# 高级函数插件
from crazy_functional import get_crazy_functions
DEFAULT_FN_GROUPS = get_conf('DEFAULT_FN_GROUPS')
plugins = get_crazy_functions()
all_plugin_groups = list(set([g for _, plugin in plugins.items() for g in plugin['Group'].split('|')]))
match_group = lambda tags, groups: any([g in groups for g in tags.split('|')])
# 处理markdown文本格式的转变
gr.Chatbot.postprocess = format_io
# 做一些外观色彩上的调整
set_theme = adjust_theme()
# 代理与自动更新
from check_proxy import check_proxy, auto_update, warm_up_modules
proxy_info = check_proxy(proxies)
gr_L1 = lambda: gr.Row().style()
gr_L2 = lambda scale, elem_id: gr.Column(scale=scale, elem_id=elem_id, min_width=400)
if LAYOUT == "TOP-DOWN":
gr_L1 = lambda: DummyWith()
gr_L2 = lambda scale, elem_id: gr.Row()
CHATBOT_HEIGHT /= 2
cancel_handles = []
customize_btns = {}
predefined_btns = {}
with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as app_block:
gr.HTML(title_html)
secret_css, web_cookie_cache = gr.Textbox(visible=False), gr.Textbox(visible=False)
cookies = gr.State(load_chat_cookies())
with gr_L1():
with gr_L2(scale=2, elem_id="gpt-chat"):
chatbot = gr.Chatbot(label=f"当前模型:{LLM_MODEL}", elem_id="gpt-chatbot")
if LAYOUT == "TOP-DOWN": chatbot.style(height=CHATBOT_HEIGHT)
history = gr.State([])
with gr_L2(scale=1, elem_id="gpt-panel"):
with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary:
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Input question here.", elem_id='user_input_main').style(container=False)
with gr.Row():
submitBtn = gr.Button("提交", elem_id="elem_submit", variant="primary")
with gr.Row():
resetBtn = gr.Button("重置", elem_id="elem_reset", variant="secondary"); resetBtn.style(size="sm")
stopBtn = gr.Button("停止", elem_id="elem_stop", variant="secondary"); stopBtn.style(size="sm")
clearBtn = gr.Button("清除", elem_id="elem_clear", variant="secondary", visible=False); clearBtn.style(size="sm")
if ENABLE_AUDIO:
with gr.Row():
audio_mic = gr.Audio(source="microphone", type="numpy", elem_id="elem_audio", streaming=True, show_label=False).style(container=False)
with gr.Row():
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel")
with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn:
with gr.Row():
for k in range(NUM_CUSTOM_BASIC_BTN):
customize_btn = gr.Button("自定义按钮" + str(k+1), visible=False, variant="secondary", info_str=f'基础功能区: 自定义按钮')
customize_btn.style(size="sm")
customize_btns.update({"自定义按钮" + str(k+1): customize_btn})
for k in functional:
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
functional[k]["Button"] = gr.Button(k, variant=variant, info_str=f'基础功能区: {k}')
functional[k]["Button"].style(size="sm")
predefined_btns.update({k: functional[k]["Button"]})
with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn:
with gr.Row():
gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)")
with gr.Row(elem_id="input-plugin-group"):
plugin_group_sel = gr.Dropdown(choices=all_plugin_groups, label='', show_label=False, value=DEFAULT_FN_GROUPS,
multiselect=True, interactive=True, elem_classes='normal_mut_select').style(container=False)
with gr.Row():
for k, plugin in plugins.items():
if not plugin.get("AsButton", True): continue
visible = True if match_group(plugin['Group'], DEFAULT_FN_GROUPS) else False
variant = plugins[k]["Color"] if "Color" in plugin else "secondary"
info = plugins[k].get("Info", k)
plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant,
visible=visible, info_str=f'函数插件区: {info}').style(size="sm")
with gr.Row():
with gr.Accordion("更多函数插件", open=True):
dropdown_fn_list = []
for k, plugin in plugins.items():
if not match_group(plugin['Group'], DEFAULT_FN_GROUPS): continue
if not plugin.get("AsButton", True): dropdown_fn_list.append(k) # 排除已经是按钮的插件
elif plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示
with gr.Row():
dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="", show_label=False).style(container=False)
with gr.Row():
plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False,
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
with gr.Row():
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm")
with gr.Row():
with gr.Accordion("点击展开“文件下载区”。", open=False) as area_file_up:
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden", elem_id="tooltip"):
with gr.Row():
with gr.Tab("上传文件", elem_id="interact-panel"):
gr.Markdown("请上传本地文件/压缩包供“函数插件区”功能调用。请注意: 上传文件后会自动把输入区修改为相应路径。")
file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload_float")
with gr.Tab("更换模型", elem_id="interact-panel"):
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature", elem_id="elem_temperature")
max_length_sl = gr.Slider(minimum=256, maximum=1024*32, value=4096, step=128, interactive=True, label="Local LLM MaxLength",)
system_prompt = gr.Textbox(show_label=True, lines=2, placeholder=f"System Prompt", label="System prompt", value=INIT_SYS_PROMPT, elem_id="elem_prompt")
temperature.change(None, inputs=[temperature], outputs=None,
_js="""(temperature)=>gpt_academic_gradio_saveload("save", "elem_prompt", "js_temperature_cookie", temperature)""")
system_prompt.change(None, inputs=[system_prompt], outputs=None,
_js="""(system_prompt)=>gpt_academic_gradio_saveload("save", "elem_prompt", "js_system_prompt_cookie", system_prompt)""")
with gr.Tab("界面外观", elem_id="interact-panel"):
theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False)
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
opt = ["自定义菜单"]
value=[]
if ADD_WAIFU: opt += ["添加Live2D形象"]; value += ["添加Live2D形象"]
checkboxes_2 = gr.CheckboxGroup(opt, value=value, label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False)
dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
dark_mode_btn.click(None, None, None, _js=js_code_for_toggle_darkmode)
with gr.Tab("帮助", elem_id="interact-panel"):
gr.Markdown(help_menu_description)
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_input_secondary:
with gr.Accordion("浮动输入区", open=True, elem_id="input-panel2"):
with gr.Row() as row:
row.style(equal_height=True)
with gr.Column(scale=10):
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.",
elem_id='user_input_float', lines=8, label="输入区2").style(container=False)
with gr.Column(scale=1, min_width=40):
submitBtn2 = gr.Button("提交", variant="primary"); submitBtn2.style(size="sm")
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
clearBtn2 = gr.Button("清除", elem_id="elem_clear2", variant="secondary", visible=False); clearBtn2.style(size="sm")
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_customize:
with gr.Accordion("自定义菜单", open=True, elem_id="edit-panel"):
with gr.Row() as row:
with gr.Column(scale=10):
AVAIL_BTN = [btn for btn in customize_btns.keys()] + [k for k in functional]
basic_btn_dropdown = gr.Dropdown(AVAIL_BTN, value="自定义按钮1", label="选择一个需要自定义基础功能区按钮").style(container=False)
basic_fn_title = gr.Textbox(show_label=False, placeholder="输入新按钮名称", lines=1).style(container=False)
basic_fn_prefix = gr.Textbox(show_label=False, placeholder="输入新提示前缀", lines=4).style(container=False)
basic_fn_suffix = gr.Textbox(show_label=False, placeholder="输入新提示后缀", lines=4).style(container=False)
with gr.Column(scale=1, min_width=70):
basic_fn_confirm = gr.Button("确认并保存", variant="primary"); basic_fn_confirm.style(size="sm")
basic_fn_clean = gr.Button("恢复默认", variant="primary"); basic_fn_clean.style(size="sm")
from shared_utils.cookie_manager import assign_btn__fn_builder
assign_btn = assign_btn__fn_builder(customize_btns, predefined_btns, cookies, web_cookie_cache)
# update btn
h = basic_fn_confirm.click(assign_btn, [web_cookie_cache, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
[web_cookie_cache, cookies, *customize_btns.values(), *predefined_btns.values()])
h.then(None, [web_cookie_cache], None, _js="""(web_cookie_cache)=>{setCookie("web_cookie_cache", web_cookie_cache, 365);}""")
# clean up btn
h2 = basic_fn_clean.click(assign_btn, [web_cookie_cache, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix, gr.State(True)],
[web_cookie_cache, cookies, *customize_btns.values(), *predefined_btns.values()])
h2.then(None, [web_cookie_cache], None, _js="""(web_cookie_cache)=>{setCookie("web_cookie_cache", web_cookie_cache, 365);}""")
# 功能区显示开关与功能区的互动
def fn_area_visibility(a):
ret = {}
ret.update({area_input_primary: gr.update(visible=("浮动输入区" not in a))})
ret.update({area_input_secondary: gr.update(visible=("浮动输入区" in a))})
ret.update({plugin_advanced_arg: gr.update(visible=("插件参数区" in a))})
if "浮动输入区" in a: ret.update({txt: gr.update(value="")})
return ret
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, plugin_advanced_arg] )
checkboxes.select(None, [checkboxes], None, _js=js_code_show_or_hide)
# 功能区显示开关与功能区的互动
def fn_area_visibility_2(a):
ret = {}
ret.update({area_customize: gr.update(visible=("自定义菜单" in a))})
return ret
checkboxes_2.select(fn_area_visibility_2, [checkboxes_2], [area_customize] )
checkboxes_2.select(None, [checkboxes_2], None, _js=js_code_show_or_hide_group2)
# 整理反复出现的控件句柄组合
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
output_combo = [cookies, chatbot, history, status]
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True)], outputs=output_combo)
# 提交按钮、重置按钮
cancel_handles.append(txt.submit(**predict_args))
cancel_handles.append(txt2.submit(**predict_args))
cancel_handles.append(submitBtn.click(**predict_args))
cancel_handles.append(submitBtn2.click(**predict_args))
resetBtn.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
resetBtn2.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) # 再在后端清除history
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) # 再在后端清除history
clearBtn.click(None, None, [txt, txt2], _js=js_code_clear)
clearBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
if AUTO_CLEAR_TXT:
submitBtn.click(None, None, [txt, txt2], _js=js_code_clear)
submitBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
txt.submit(None, None, [txt, txt2], _js=js_code_clear)
txt2.submit(None, None, [txt, txt2], _js=js_code_clear)
# 基础功能区的回调函数注册
for k in functional:
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
cancel_handles.append(click_handle)
for btn in customize_btns.values():
click_handle = btn.click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(btn.value)], outputs=output_combo)
cancel_handles.append(click_handle)
# 文件上传区,接收文件后与chatbot的互动
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}")
file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}")
# 函数插件-固定按钮区
for k in plugins:
if not plugins[k].get("AsButton", True): continue
click_handle = plugins[k]["Button"].click(ArgsGeneralWrapper(plugins[k]["Function"]), [*input_combo], output_combo)
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot])
cancel_handles.append(click_handle)
# 函数插件-下拉菜单与随变按钮的互动
def on_dropdown_changed(k):
variant = plugins[k]["Color"] if "Color" in plugins[k] else "secondary"
info = plugins[k].get("Info", k)
ret = {switchy_bt: gr.update(value=k, variant=variant, info_str=f'函数插件区: {info}')}
if plugins[k].get("AdvancedArgs", False): # 是否唤起高级插件参数区
ret.update({plugin_advanced_arg: gr.update(visible=True, label=f"插件[{k}]的高级参数说明:" + plugins[k].get("ArgsReminder", [f"没有提供高级参数功能说明"]))})
else:
ret.update({plugin_advanced_arg: gr.update(visible=False, label=f"插件[{k}]不需要高级参数。")})
return ret
dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt, plugin_advanced_arg] )
def on_md_dropdown_changed(k):
return {chatbot: gr.update(label="当前模型:"+k)}
md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot] )
def on_theme_dropdown_changed(theme, secret_css):
adjust_theme, css_part1, _, adjust_dynamic_theme = load_dynamic_theme(theme)
if adjust_dynamic_theme:
css_part2 = adjust_dynamic_theme._get_theme_css()
else:
css_part2 = adjust_theme()._get_theme_css()
return css_part2 + css_part1
theme_handle = theme_dropdown.select(on_theme_dropdown_changed, [theme_dropdown, secret_css], [secret_css])
theme_handle.then(
None,
[secret_css],
None,
_js=js_code_for_css_changing
)
# 随变按钮的回调函数注册
def route(request: gr.Request, k, *args, **kwargs):
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
yield from ArgsGeneralWrapper(plugins[k]["Function"])(request, *args, **kwargs)
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo], output_combo)
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot])
cancel_handles.append(click_handle)
# 终止按钮的回调函数注册
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
plugins_as_btn = {name:plugin for name, plugin in plugins.items() if plugin.get('Button', None)}
def on_group_change(group_list):
btn_list = []
fns_list = []
if not group_list: # 处理特殊情况:没有选择任何插件组
return [*[plugin['Button'].update(visible=False) for _, plugin in plugins_as_btn.items()], gr.Dropdown.update(choices=[])]
for k, plugin in plugins.items():
if plugin.get("AsButton", True):
btn_list.append(plugin['Button'].update(visible=match_group(plugin['Group'], group_list))) # 刷新按钮
if plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示
elif match_group(plugin['Group'], group_list): fns_list.append(k) # 刷新下拉列表
return [*btn_list, gr.Dropdown.update(choices=fns_list)]
plugin_group_sel.select(fn=on_group_change, inputs=[plugin_group_sel], outputs=[*[plugin['Button'] for name, plugin in plugins_as_btn.items()], dropdown])
if ENABLE_AUDIO:
from crazy_functions.live_audio.audio_io import RealtimeAudioDistribution
rad = RealtimeAudioDistribution()
def deal_audio(audio, cookies):
rad.feed(cookies['uuid'].hex, audio)
audio_mic.stream(deal_audio, inputs=[audio_mic, cookies])
app_block.load(assign_user_uuid, inputs=[cookies], outputs=[cookies])
from shared_utils.cookie_manager import load_web_cookie_cache__fn_builder
load_web_cookie_cache = load_web_cookie_cache__fn_builder(customize_btns, cookies, predefined_btns)
app_block.load(load_web_cookie_cache, inputs = [web_cookie_cache, cookies],
outputs = [web_cookie_cache, cookies, *customize_btns.values(), *predefined_btns.values()], _js=js_code_for_persistent_cookie_init)
app_block.load(None, inputs=[], outputs=None, _js=f"""()=>GptAcademicJavaScriptInit("{DARK_MODE}","{INIT_SYS_PROMPT}","{ADD_WAIFU}","{LAYOUT}")""") # 配置暗色主题或亮色主题
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
def run_delayed_tasks():
import threading, webbrowser, time
print(f"如果浏览器没有自动打开,请复制并转到以下URL")
if DARK_MODE: print(f"\t「暗色主题已启用(支持动态切换主题)」: http://localhost:{PORT}")
else: print(f"\t「亮色主题已启用(支持动态切换主题)」: http://localhost:{PORT}")
def auto_updates(): time.sleep(0); auto_update()
def open_browser(): time.sleep(2); webbrowser.open_new_tab(f"http://localhost:{PORT}")
def warm_up_mods(): time.sleep(6); warm_up_modules()
threading.Thread(target=auto_updates, name="self-upgrade", daemon=True).start() # 查看自动更新
threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面
threading.Thread(target=warm_up_mods, name="warm-up", daemon=True).start() # 预热tiktoken模块
# 运行一些异步任务自动更新、打开浏览器页面、预热tiktoken模块
run_delayed_tasks()
# 最后,正式开始服务
from shared_utils.fastapi_server import start_app
start_app(app_block, CONCURRENT_COUNT, AUTHENTICATION, PORT, SSL_KEYFILE, SSL_CERTFILE)
if __name__ == "__main__":
main()
import os, json; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
help_menu_description = \
"""Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic),
感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors).
</br></br>常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki),
如遇到Bug请前往[Bug反馈](https://github.com/binary-husky/gpt_academic/issues).
</br></br>普通对话使用说明: 1. 输入问题; 2. 点击提交
</br></br>基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮
</br></br>函数插件区使用说明: 1. 输入路径/问题, 或者上传文件; 2. 点击任意函数插件区按钮
</br></br>虚空终端使用说明: 点击虚空终端, 然后根据提示输入指令, 再次点击虚空终端
</br></br>如何保存对话: 点击保存当前的对话按钮
</br></br>如何语音对话: 请阅读Wiki
</br></br>如何临时更换API_KEY: 在输入区输入临时API_KEY后提交网页刷新后失效"""
def enable_log(PATH_LOGGING):
import logging
admin_log_path = os.path.join(PATH_LOGGING, "admin")
os.makedirs(admin_log_path, exist_ok=True)
log_dir = os.path.join(admin_log_path, "chat_secrets.log")
try:logging.basicConfig(filename=log_dir, level=logging.INFO, encoding="utf-8", format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
except:logging.basicConfig(filename=log_dir, level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
# Disable logging output from the 'httpx' logger
logging.getLogger("httpx").setLevel(logging.WARNING)
print(f"所有对话记录将自动保存在本地目录{log_dir}, 请注意自我隐私保护哦!")
def encode_plugin_info(k, plugin)->str:
import copy
from themes.theme import to_cookie_str
plugin_ = copy.copy(plugin)
plugin_.pop("Function", None)
plugin_.pop("Class", None)
plugin_.pop("Button", None)
plugin_["Info"] = plugin.get("Info", k)
if plugin.get("AdvancedArgs", False):
plugin_["Label"] = f"插件[{k}]的高级参数说明:" + plugin.get("ArgsReminder", f"没有提供高级参数功能说明")
else:
plugin_["Label"] = f"插件[{k}]不需要高级参数。"
return to_cookie_str(plugin_)
def main():
import gradio as gr
if gr.__version__ not in ['3.32.9', '3.32.10', '3.32.11']:
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
from request_llms.bridge_all import predict
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
# 读取配置
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME, ADD_WAIFU = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME', 'ADD_WAIFU')
NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE')
DARK_MODE, INIT_SYS_PROMPT, ADD_WAIFU, TTS_TYPE = get_conf('DARK_MODE', 'INIT_SYS_PROMPT', 'ADD_WAIFU', 'TTS_TYPE')
if LLM_MODEL not in AVAIL_LLM_MODELS: AVAIL_LLM_MODELS += [LLM_MODEL]
# 如果WEB_PORT是-1, 则随机选取WEB端口
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
from check_proxy import get_current_version
from themes.theme import adjust_theme, advanced_css, theme_declaration, js_code_clear, js_code_reset, js_code_show_or_hide, js_code_show_or_hide_group2
from themes.theme import js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, assign_user_uuid
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
# 对话、日志记录
enable_log(PATH_LOGGING)
# 一些普通功能模块
from core_functional import get_core_functions
functional = get_core_functions()
# 高级函数插件
from crazy_functional import get_crazy_functions
DEFAULT_FN_GROUPS = get_conf('DEFAULT_FN_GROUPS')
plugins = get_crazy_functions()
all_plugin_groups = list(set([g for _, plugin in plugins.items() for g in plugin['Group'].split('|')]))
match_group = lambda tags, groups: any([g in groups for g in tags.split('|')])
# 处理markdown文本格式的转变
gr.Chatbot.postprocess = format_io
# 做一些外观色彩上的调整
set_theme = adjust_theme()
# 代理与自动更新
from check_proxy import check_proxy, auto_update, warm_up_modules
proxy_info = check_proxy(proxies)
# 切换布局
gr_L1 = lambda: gr.Row().style()
gr_L2 = lambda scale, elem_id: gr.Column(scale=scale, elem_id=elem_id, min_width=400)
if LAYOUT == "TOP-DOWN":
gr_L1 = lambda: DummyWith()
gr_L2 = lambda scale, elem_id: gr.Row()
CHATBOT_HEIGHT /= 2
cancel_handles = []
customize_btns = {}
predefined_btns = {}
from shared_utils.cookie_manager import make_cookie_cache, make_history_cache
with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as app_block:
gr.HTML(title_html)
secret_css = gr.Textbox(visible=False, elem_id="secret_css")
register_advanced_plugin_init_arr = ""
cookies, web_cookie_cache = make_cookie_cache() # 定义 后端statecookies、前端web_cookie_cache两兄弟
with gr_L1():
with gr_L2(scale=2, elem_id="gpt-chat"):
chatbot = gr.Chatbot(label=f"当前模型:{LLM_MODEL}", elem_id="gpt-chatbot")
if LAYOUT == "TOP-DOWN": chatbot.style(height=CHATBOT_HEIGHT)
history, history_cache, history_cache_update = make_history_cache() # 定义 后端statehistory、前端history_cache、后端setterhistory_cache_update三兄弟
with gr_L2(scale=1, elem_id="gpt-panel"):
with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary:
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Input question here.", elem_id='user_input_main').style(container=False)
with gr.Row(elem_id="gpt-submit-row"):
multiplex_submit_btn = gr.Button("提交", elem_id="elem_submit_visible", variant="primary")
multiplex_sel = gr.Dropdown(
choices=[
"常规对话",
"多模型对话",
# "智能上下文",
# "智能召回 RAG",
], value="常规对话",
interactive=True, label='', show_label=False,
elem_classes='normal_mut_select', elem_id="gpt-submit-dropdown").style(container=False)
submit_btn = gr.Button("提交", elem_id="elem_submit", variant="primary", visible=False)
with gr.Row():
resetBtn = gr.Button("重置", elem_id="elem_reset", variant="secondary"); resetBtn.style(size="sm")
stopBtn = gr.Button("停止", elem_id="elem_stop", variant="secondary"); stopBtn.style(size="sm")
clearBtn = gr.Button("清除", elem_id="elem_clear", variant="secondary", visible=False); clearBtn.style(size="sm")
if ENABLE_AUDIO:
with gr.Row():
audio_mic = gr.Audio(source="microphone", type="numpy", elem_id="elem_audio", streaming=True, show_label=False).style(container=False)
with gr.Row():
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。支持将文件直接粘贴到输入区。", elem_id="state-panel")
with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn:
with gr.Row():
for k in range(NUM_CUSTOM_BASIC_BTN):
customize_btn = gr.Button("自定义按钮" + str(k+1), visible=False, variant="secondary", info_str=f'基础功能区: 自定义按钮')
customize_btn.style(size="sm")
customize_btns.update({"自定义按钮" + str(k+1): customize_btn})
for k in functional:
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
functional[k]["Button"] = gr.Button(k, variant=variant, info_str=f'基础功能区: {k}')
functional[k]["Button"].style(size="sm")
predefined_btns.update({k: functional[k]["Button"]})
with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn:
with gr.Row():
gr.Markdown("<small>插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)</small>")
with gr.Row(elem_id="input-plugin-group"):
plugin_group_sel = gr.Dropdown(choices=all_plugin_groups, label='', show_label=False, value=DEFAULT_FN_GROUPS,
multiselect=True, interactive=True, elem_classes='normal_mut_select').style(container=False)
with gr.Row():
for index, (k, plugin) in enumerate(plugins.items()):
if not plugin.get("AsButton", True): continue
visible = True if match_group(plugin['Group'], DEFAULT_FN_GROUPS) else False
variant = plugins[k]["Color"] if "Color" in plugin else "secondary"
info = plugins[k].get("Info", k)
btn_elem_id = f"plugin_btn_{index}"
plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant,
visible=visible, info_str=f'函数插件区: {info}', elem_id=btn_elem_id).style(size="sm")
plugin['ButtonElemId'] = btn_elem_id
with gr.Row():
with gr.Accordion("更多函数插件", open=True):
dropdown_fn_list = []
for k, plugin in plugins.items():
if not match_group(plugin['Group'], DEFAULT_FN_GROUPS): continue
if not plugin.get("AsButton", True): dropdown_fn_list.append(k) # 排除已经是按钮的插件
elif plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示
with gr.Row():
dropdown = gr.Dropdown(dropdown_fn_list, value=r"点击这里输入「关键词」搜索插件", label="", show_label=False).style(container=False)
with gr.Row():
plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False, elem_id="advance_arg_input_legacy",
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
with gr.Row():
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary", elem_id="elem_switchy_bt").style(size="sm")
with gr.Row():
with gr.Accordion("点击展开“文件下载区”。", open=False) as area_file_up:
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
# 左上角工具栏定义
from themes.gui_toolbar import define_gui_toolbar
checkboxes, checkboxes_2, max_length_sl, theme_dropdown, system_prompt, file_upload_2, md_dropdown, top_p, temperature = \
define_gui_toolbar(AVAIL_LLM_MODELS, LLM_MODEL, INIT_SYS_PROMPT, THEME, AVAIL_THEMES, ADD_WAIFU, help_menu_description, js_code_for_toggle_darkmode)
# 浮动菜单定义
from themes.gui_floating_menu import define_gui_floating_menu
area_input_secondary, txt2, area_customize, _, resetBtn2, clearBtn2, stopBtn2 = \
define_gui_floating_menu(customize_btns, functional, predefined_btns, cookies, web_cookie_cache)
# 插件二级菜单的实现
from themes.gui_advanced_plugin_class import define_gui_advanced_plugin_class
new_plugin_callback, route_switchy_bt_with_arg, usr_confirmed_arg = \
define_gui_advanced_plugin_class(plugins)
# 功能区显示开关与功能区的互动
def fn_area_visibility(a):
ret = {}
ret.update({area_input_primary: gr.update(visible=("浮动输入区" not in a))})
ret.update({area_input_secondary: gr.update(visible=("浮动输入区" in a))})
ret.update({plugin_advanced_arg: gr.update(visible=("插件参数区" in a))})
if "浮动输入区" in a: ret.update({txt: gr.update(value="")})
return ret
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, plugin_advanced_arg] )
checkboxes.select(None, [checkboxes], None, _js=js_code_show_or_hide)
# 功能区显示开关与功能区的互动
def fn_area_visibility_2(a):
ret = {}
ret.update({area_customize: gr.update(visible=("自定义菜单" in a))})
return ret
checkboxes_2.select(fn_area_visibility_2, [checkboxes_2], [area_customize] )
checkboxes_2.select(None, [checkboxes_2], None, _js=js_code_show_or_hide_group2)
# 整理反复出现的控件句柄组合
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
input_combo_order = ["cookies", "max_length_sl", "md_dropdown", "txt", "txt2", "top_p", "temperature", "chatbot", "history", "system_prompt", "plugin_advanced_arg"]
output_combo = [cookies, chatbot, history, status]
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True)], outputs=output_combo)
# 提交按钮、重置按钮
multiplex_submit_btn.click(
None, [multiplex_sel], None, _js="""(multiplex_sel)=>multiplex_function_begin(multiplex_sel)""")
txt.submit(
None, [multiplex_sel], None, _js="""(multiplex_sel)=>multiplex_function_begin(multiplex_sel)""")
multiplex_sel.select(
None, [multiplex_sel], None, _js=f"""(multiplex_sel)=>run_multiplex_shift(multiplex_sel)""")
cancel_handles.append(submit_btn.click(**predict_args))
resetBtn.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
resetBtn2.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
reset_server_side_args = (lambda history: ([], [], "已重置", json.dumps(history)), [history], [chatbot, history, status, history_cache])
resetBtn.click(*reset_server_side_args) # 再在后端清除history,把history转存history_cache备用
resetBtn2.click(*reset_server_side_args) # 再在后端清除history,把history转存history_cache备用
clearBtn.click(None, None, [txt, txt2], _js=js_code_clear)
clearBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
if AUTO_CLEAR_TXT:
submit_btn.click(None, None, [txt, txt2], _js=js_code_clear)
# 基础功能区的回调函数注册
for k in functional:
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
cancel_handles.append(click_handle)
for btn in customize_btns.values():
click_handle = btn.click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(btn.value)], outputs=output_combo)
cancel_handles.append(click_handle)
# 文件上传区,接收文件后与chatbot的互动
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}")
file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}")
# 函数插件-固定按钮区
for k in plugins:
register_advanced_plugin_init_arr += f"""register_plugin_init("{k}","{encode_plugin_info(k, plugins[k])}");"""
if plugins[k].get("Class", None):
plugins[k]["JsMenu"] = plugins[k]["Class"]().get_js_code_for_generating_menu(k)
register_advanced_plugin_init_arr += """register_advanced_plugin_init_code("{k}","{gui_js}");""".format(k=k, gui_js=plugins[k]["JsMenu"])
if not plugins[k].get("AsButton", True): continue
if plugins[k].get("Class", None) is None:
assert plugins[k].get("Function", None) is not None
click_handle = plugins[k]["Button"].click(None, inputs=[], outputs=None, _js=f"""()=>run_classic_plugin_via_id("{plugins[k]["ButtonElemId"]}")""")
else:
click_handle = plugins[k]["Button"].click(None, inputs=[], outputs=None, _js=f"""()=>run_advanced_plugin_launch_code("{k}")""")
# 函数插件-下拉菜单与随变按钮的互动(新版-更流畅)
dropdown.select(None, [dropdown], None, _js=f"""(dropdown)=>run_dropdown_shift(dropdown)""")
# 模型切换时的回调
def on_md_dropdown_changed(k):
return {chatbot: gr.update(label="当前模型:"+k)}
md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot])
# 主题修改
def on_theme_dropdown_changed(theme, secret_css):
adjust_theme, css_part1, _, adjust_dynamic_theme = load_dynamic_theme(theme)
if adjust_dynamic_theme:
css_part2 = adjust_dynamic_theme._get_theme_css()
else:
css_part2 = adjust_theme()._get_theme_css()
return css_part2 + css_part1
theme_handle = theme_dropdown.select(on_theme_dropdown_changed, [theme_dropdown, secret_css], [secret_css]) # , _js="""change_theme_prepare""")
theme_handle.then(None, [theme_dropdown, secret_css], None, _js="""change_theme""")
switchy_bt.click(None, [switchy_bt], None, _js="(switchy_bt)=>on_flex_button_click(switchy_bt)")
# 随变按钮的回调函数注册
def route(request: gr.Request, k, *args, **kwargs):
if k not in [r"点击这里搜索插件列表", r"请先从插件列表中选择"]:
if plugins[k].get("Class", None) is None:
assert plugins[k].get("Function", None) is not None
yield from ArgsGeneralWrapper(plugins[k]["Function"])(request, *args, **kwargs)
# 旧插件的高级参数区确认按钮(隐藏)
old_plugin_callback = gr.Button(r"未选定任何插件", variant="secondary", visible=False, elem_id="old_callback_btn_for_plugin_exe")
click_handle_ng = old_plugin_callback.click(route, [switchy_bt, *input_combo], output_combo)
click_handle_ng.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot]).then(None, [switchy_bt], None, _js=r"(fn)=>on_plugin_exe_complete(fn)")
cancel_handles.append(click_handle_ng)
# 新一代插件的高级参数区确认按钮(隐藏)
click_handle_ng = new_plugin_callback.click(route_switchy_bt_with_arg,
[
gr.State(["new_plugin_callback", "usr_confirmed_arg"] + input_combo_order), # 第一个参数: 指定了后续参数的名称
new_plugin_callback, usr_confirmed_arg, *input_combo # 后续参数: 真正的参数
], output_combo)
click_handle_ng.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot]).then(None, [switchy_bt], None, _js=r"(fn)=>on_plugin_exe_complete(fn)")
cancel_handles.append(click_handle_ng)
# 终止按钮的回调函数注册
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
plugins_as_btn = {name:plugin for name, plugin in plugins.items() if plugin.get('Button', None)}
def on_group_change(group_list):
btn_list = []
fns_list = []
if not group_list: # 处理特殊情况:没有选择任何插件组
return [*[plugin['Button'].update(visible=False) for _, plugin in plugins_as_btn.items()], gr.Dropdown.update(choices=[])]
for k, plugin in plugins.items():
if plugin.get("AsButton", True):
btn_list.append(plugin['Button'].update(visible=match_group(plugin['Group'], group_list))) # 刷新按钮
if plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示
elif match_group(plugin['Group'], group_list): fns_list.append(k) # 刷新下拉列表
return [*btn_list, gr.Dropdown.update(choices=fns_list)]
plugin_group_sel.select(fn=on_group_change, inputs=[plugin_group_sel], outputs=[*[plugin['Button'] for name, plugin in plugins_as_btn.items()], dropdown])
# 是否启动语音输入功能
if ENABLE_AUDIO:
from crazy_functions.live_audio.audio_io import RealtimeAudioDistribution
rad = RealtimeAudioDistribution()
def deal_audio(audio, cookies):
rad.feed(cookies['uuid'].hex, audio)
audio_mic.stream(deal_audio, inputs=[audio_mic, cookies])
# 生成当前浏览器窗口的uuid刷新失效
app_block.load(assign_user_uuid, inputs=[cookies], outputs=[cookies])
# 初始化(前端)
from shared_utils.cookie_manager import load_web_cookie_cache__fn_builder
load_web_cookie_cache = load_web_cookie_cache__fn_builder(customize_btns, cookies, predefined_btns)
app_block.load(load_web_cookie_cache, inputs = [web_cookie_cache, cookies],
outputs = [web_cookie_cache, cookies, *customize_btns.values(), *predefined_btns.values()], _js=js_code_for_persistent_cookie_init)
app_block.load(None, inputs=[], outputs=None, _js=f"""()=>GptAcademicJavaScriptInit("{DARK_MODE}","{INIT_SYS_PROMPT}","{ADD_WAIFU}","{LAYOUT}","{TTS_TYPE}")""") # 配置暗色主题或亮色主题
app_block.load(None, inputs=[], outputs=None, _js="""()=>{REP}""".replace("REP", register_advanced_plugin_init_arr))
# Gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
def run_delayed_tasks():
import threading, webbrowser, time
print(f"如果浏览器没有自动打开,请复制并转到以下URL")
if DARK_MODE: print(f"\t「暗色主题已启用(支持动态切换主题)」: http://localhost:{PORT}")
else: print(f"\t「亮色主题已启用(支持动态切换主题)」: http://localhost:{PORT}")
def auto_updates(): time.sleep(0); auto_update()
def open_browser(): time.sleep(2); webbrowser.open_new_tab(f"http://localhost:{PORT}")
def warm_up_mods(): time.sleep(6); warm_up_modules()
threading.Thread(target=auto_updates, name="self-upgrade", daemon=True).start() # 查看自动更新
threading.Thread(target=warm_up_mods, name="warm-up", daemon=True).start() # 预热tiktoken模块
if get_conf('AUTO_OPEN_BROWSER'):
threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面
# 运行一些异步任务自动更新、打开浏览器页面、预热tiktoken模块
run_delayed_tasks()
# 最后,正式开始服务
from shared_utils.fastapi_server import start_app
start_app(app_block, CONCURRENT_COUNT, AUTHENTICATION, PORT, SSL_KEYFILE, SSL_CERTFILE)
if __name__ == "__main__":
main()

查看文件

@@ -34,9 +34,14 @@ from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui
from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui
from .bridge_zhipu import predict as zhipu_ui
from .bridge_taichu import predict_no_ui_long_connection as taichu_noui
from .bridge_taichu import predict as taichu_ui
from .bridge_cohere import predict as cohere_ui
from .bridge_cohere import predict_no_ui_long_connection as cohere_noui
from .oai_std_model_template import get_predict_function
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
class LazyloadTiktoken(object):
@@ -66,8 +71,10 @@ api2d_endpoint = "https://openai.api2d.net/v1/chat/completions"
newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub"
gemini_endpoint = "https://generativelanguage.googleapis.com/v1beta/models"
claude_endpoint = "https://api.anthropic.com/v1/messages"
cohere_endpoint = "https://api.cohere.ai/v1/chat"
ollama_endpoint = "http://localhost:11434/api/chat"
yimodel_endpoint = "https://api.lingyiwanwu.com/v1/chat/completions"
cohere_endpoint = 'https://api.cohere.ai/v1/chat'
deepseekapi_endpoint = "https://api.deepseek.com/v1/chat/completions"
if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/'
azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15'
@@ -85,8 +92,10 @@ if api2d_endpoint in API_URL_REDIRECT: api2d_endpoint = API_URL_REDIRECT[api2d_e
if newbing_endpoint in API_URL_REDIRECT: newbing_endpoint = API_URL_REDIRECT[newbing_endpoint]
if gemini_endpoint in API_URL_REDIRECT: gemini_endpoint = API_URL_REDIRECT[gemini_endpoint]
if claude_endpoint in API_URL_REDIRECT: claude_endpoint = API_URL_REDIRECT[claude_endpoint]
if yimodel_endpoint in API_URL_REDIRECT: yimodel_endpoint = API_URL_REDIRECT[yimodel_endpoint]
if cohere_endpoint in API_URL_REDIRECT: cohere_endpoint = API_URL_REDIRECT[cohere_endpoint]
if ollama_endpoint in API_URL_REDIRECT: ollama_endpoint = API_URL_REDIRECT[ollama_endpoint]
if yimodel_endpoint in API_URL_REDIRECT: yimodel_endpoint = API_URL_REDIRECT[yimodel_endpoint]
if deepseekapi_endpoint in API_URL_REDIRECT: deepseekapi_endpoint = API_URL_REDIRECT[deepseekapi_endpoint]
# 获取tokenizer
tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
@@ -110,6 +119,15 @@ model_info = {
"token_cnt": get_token_num_gpt35,
},
"taichu": {
"fn_with_ui": taichu_ui,
"fn_without_ui": taichu_noui,
"endpoint": openai_endpoint,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"gpt-3.5-turbo-16k": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
@@ -173,6 +191,36 @@ model_info = {
"token_cnt": get_token_num_gpt4,
},
"gpt-4o": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": openai_endpoint,
"has_multimodal_capacity": True,
"max_token": 128000,
"tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4,
},
"gpt-4o-mini": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": openai_endpoint,
"has_multimodal_capacity": True,
"max_token": 128000,
"tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4,
},
"gpt-4o-2024-05-13": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"has_multimodal_capacity": True,
"endpoint": openai_endpoint,
"max_token": 128000,
"tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4,
},
"gpt-4-turbo-preview": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
@@ -200,6 +248,26 @@ model_info = {
"token_cnt": get_token_num_gpt4,
},
"gpt-4-turbo": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"has_multimodal_capacity": True,
"endpoint": openai_endpoint,
"max_token": 128000,
"tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4,
},
"gpt-4-turbo-2024-04-09": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"has_multimodal_capacity": True,
"endpoint": openai_endpoint,
"max_token": 128000,
"tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4,
},
"gpt-3.5-random": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
@@ -247,6 +315,46 @@ model_info = {
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"glm-4-0520": {
"fn_with_ui": zhipu_ui,
"fn_without_ui": zhipu_noui,
"endpoint": None,
"max_token": 10124 * 8,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"glm-4-air": {
"fn_with_ui": zhipu_ui,
"fn_without_ui": zhipu_noui,
"endpoint": None,
"max_token": 10124 * 8,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"glm-4-airx": {
"fn_with_ui": zhipu_ui,
"fn_without_ui": zhipu_noui,
"endpoint": None,
"max_token": 10124 * 8,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"glm-4-flash": {
"fn_with_ui": zhipu_ui,
"fn_without_ui": zhipu_noui,
"endpoint": None,
"max_token": 10124 * 8,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"glm-4v": {
"fn_with_ui": zhipu_ui,
"fn_without_ui": zhipu_noui,
"endpoint": None,
"max_token": 1000,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"glm-3-turbo": {
"fn_with_ui": zhipu_ui,
"fn_without_ui": zhipu_noui,
@@ -376,7 +484,7 @@ for model in AVAIL_LLM_MODELS:
# -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=-
# claude家族
claude_models = ["claude-instant-1.2","claude-2.0","claude-2.1","claude-3-haiku-20240307","claude-3-sonnet-20240229","claude-3-opus-20240229"]
claude_models = ["claude-instant-1.2","claude-2.0","claude-2.1","claude-3-haiku-20240307","claude-3-sonnet-20240229","claude-3-opus-20240229","claude-3-5-sonnet-20240620"]
if any(item in claude_models for item in AVAIL_LLM_MODELS):
from .bridge_claude import predict_no_ui_long_connection as claude_noui
from .bridge_claude import predict as claude_ui
@@ -440,6 +548,16 @@ if any(item in claude_models for item in AVAIL_LLM_MODELS):
"token_cnt": get_token_num_gpt35,
},
})
model_info.update({
"claude-3-5-sonnet-20240620": {
"fn_with_ui": claude_ui,
"fn_without_ui": claude_noui,
"endpoint": claude_endpoint,
"max_token": 200000,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
})
if "jittorllms_rwkv" in AVAIL_LLM_MODELS:
from .bridge_jittorllms_rwkv import predict_no_ui_long_connection as rwkv_noui
from .bridge_jittorllms_rwkv import predict as rwkv_ui
@@ -625,14 +743,22 @@ if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-
except:
print(trimmed_format_exc())
# -=-=-=-=-=-=- 零一万物模型 -=-=-=-=-=-=-
if "yi-34b-chat-0205" in AVAIL_LLM_MODELS or "yi-34b-chat-200k" in AVAIL_LLM_MODELS: # zhipuai
yi_models = ["yi-34b-chat-0205","yi-34b-chat-200k","yi-large","yi-medium","yi-spark","yi-large-turbo","yi-large-preview"]
if any(item in yi_models for item in AVAIL_LLM_MODELS):
try:
from .bridge_yimodel import predict_no_ui_long_connection as yimodel_noui
from .bridge_yimodel import predict as yimodel_ui
yimodel_4k_noui, yimodel_4k_ui = get_predict_function(
api_key_conf_name="YIMODEL_API_KEY", max_output_token=600, disable_proxy=False
)
yimodel_16k_noui, yimodel_16k_ui = get_predict_function(
api_key_conf_name="YIMODEL_API_KEY", max_output_token=4000, disable_proxy=False
)
yimodel_200k_noui, yimodel_200k_ui = get_predict_function(
api_key_conf_name="YIMODEL_API_KEY", max_output_token=4096, disable_proxy=False
)
model_info.update({
"yi-34b-chat-0205": {
"fn_with_ui": yimodel_ui,
"fn_without_ui": yimodel_noui,
"fn_with_ui": yimodel_4k_ui,
"fn_without_ui": yimodel_4k_noui,
"can_multi_thread": False, # 目前来说,默认情况下并发量极低,因此禁用
"endpoint": yimodel_endpoint,
"max_token": 4000,
@@ -640,14 +766,59 @@ if "yi-34b-chat-0205" in AVAIL_LLM_MODELS or "yi-34b-chat-200k" in AVAIL_LLM_MOD
"token_cnt": get_token_num_gpt35,
},
"yi-34b-chat-200k": {
"fn_with_ui": yimodel_ui,
"fn_without_ui": yimodel_noui,
"fn_with_ui": yimodel_200k_ui,
"fn_without_ui": yimodel_200k_noui,
"can_multi_thread": False, # 目前来说,默认情况下并发量极低,因此禁用
"endpoint": yimodel_endpoint,
"max_token": 200000,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"yi-large": {
"fn_with_ui": yimodel_16k_ui,
"fn_without_ui": yimodel_16k_noui,
"can_multi_thread": False, # 目前来说,默认情况下并发量极低,因此禁用
"endpoint": yimodel_endpoint,
"max_token": 16000,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"yi-medium": {
"fn_with_ui": yimodel_16k_ui,
"fn_without_ui": yimodel_16k_noui,
"can_multi_thread": True, # 这个并发量稍微大一点
"endpoint": yimodel_endpoint,
"max_token": 16000,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"yi-spark": {
"fn_with_ui": yimodel_16k_ui,
"fn_without_ui": yimodel_16k_noui,
"can_multi_thread": True, # 这个并发量稍微大一点
"endpoint": yimodel_endpoint,
"max_token": 16000,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"yi-large-turbo": {
"fn_with_ui": yimodel_16k_ui,
"fn_without_ui": yimodel_16k_noui,
"can_multi_thread": False, # 目前来说,默认情况下并发量极低,因此禁用
"endpoint": yimodel_endpoint,
"max_token": 16000,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"yi-large-preview": {
"fn_with_ui": yimodel_16k_ui,
"fn_without_ui": yimodel_16k_noui,
"can_multi_thread": False, # 目前来说,默认情况下并发量极低,因此禁用
"endpoint": yimodel_endpoint,
"max_token": 16000,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
})
except:
print(trimmed_format_exc())
@@ -708,6 +879,15 @@ if "sparkv3" in AVAIL_LLM_MODELS or "sparkv3.5" in AVAIL_LLM_MODELS: # 讯飞
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"sparkv4":{
"fn_with_ui": spark_ui,
"fn_without_ui": spark_noui,
"can_multi_thread": True,
"endpoint": None,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
except:
@@ -760,8 +940,34 @@ if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
})
except:
print(trimmed_format_exc())
# -=-=-=-=-=-=- 幻方-深度求索大模型在线API -=-=-=-=-=-=-
if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS:
try:
deepseekapi_noui, deepseekapi_ui = get_predict_function(
api_key_conf_name="DEEPSEEK_API_KEY", max_output_token=4096, disable_proxy=False
)
model_info.update({
"deepseek-chat":{
"fn_with_ui": deepseekapi_ui,
"fn_without_ui": deepseekapi_noui,
"endpoint": deepseekapi_endpoint,
"can_multi_thread": True,
"max_token": 32000,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"deepseek-coder":{
"fn_with_ui": deepseekapi_ui,
"fn_without_ui": deepseekapi_noui,
"endpoint": deepseekapi_endpoint,
"can_multi_thread": True,
"max_token": 16000,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
})
except:
print(trimmed_format_exc())
# -=-=-=-=-=-=- one-api 对齐支持 -=-=-=-=-=-=-
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("one-api-")]:
# 为了更灵活地接入one-api多模型管理界面,设计了此接口,例子AVAIL_LLM_MODELS = ["one-api-mixtral-8x7b(max_token=6666)"]
@@ -770,21 +976,80 @@ for model in [m for m in AVAIL_LLM_MODELS if m.startswith("one-api-")]:
# "mixtral-8x7b" 是模型名(必要)
# "(max_token=6666)" 是配置(非必要)
try:
_, max_token_tmp = read_one_api_model_name(model)
origin_model_name, max_token_tmp = read_one_api_model_name(model)
# 如果是已知模型,则尝试获取其信息
original_model_info = model_info.get(origin_model_name.replace("one-api-", "", 1), None)
except:
print(f"one-api模型 {model} 的 max_token 配置不是整数,请检查配置文件。")
continue
this_model_info = {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"can_multi_thread": True,
"endpoint": openai_endpoint,
"max_token": max_token_tmp,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
# 同步已知模型的其他信息
attribute = "has_multimodal_capacity"
if original_model_info is not None and original_model_info.get(attribute, None) is not None: this_model_info.update({attribute: original_model_info.get(attribute, None)})
# attribute = "attribute2"
# if original_model_info is not None and original_model_info.get(attribute, None) is not None: this_model_info.update({attribute: original_model_info.get(attribute, None)})
# attribute = "attribute3"
# if original_model_info is not None and original_model_info.get(attribute, None) is not None: this_model_info.update({attribute: original_model_info.get(attribute, None)})
model_info.update({model: this_model_info})
# -=-=-=-=-=-=- vllm 对齐支持 -=-=-=-=-=-=-
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("vllm-")]:
# 为了更灵活地接入vllm多模型管理界面,设计了此接口,例子AVAIL_LLM_MODELS = ["vllm-/home/hmp/llm/cache/Qwen1___5-32B-Chat(max_token=6666)"]
# 其中
# "vllm-" 是前缀(必要)
# "mixtral-8x7b" 是模型名(必要)
# "(max_token=6666)" 是配置(非必要)
try:
_, max_token_tmp = read_one_api_model_name(model)
except:
print(f"vllm模型 {model} 的 max_token 配置不是整数,请检查配置文件。")
continue
model_info.update({
model: {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"can_multi_thread": True,
"endpoint": openai_endpoint,
"max_token": max_token_tmp,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
})
# -=-=-=-=-=-=- ollama 对齐支持 -=-=-=-=-=-=-
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("ollama-")]:
from .bridge_ollama import predict_no_ui_long_connection as ollama_noui
from .bridge_ollama import predict as ollama_ui
break
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("ollama-")]:
# 为了更灵活地接入ollama多模型管理界面,设计了此接口,例子AVAIL_LLM_MODELS = ["ollama-phi3(max_token=6666)"]
# 其中
# "ollama-" 是前缀(必要)
# "phi3" 是模型名(必要)
# "(max_token=6666)" 是配置(非必要)
try:
_, max_token_tmp = read_one_api_model_name(model)
except:
print(f"ollama模型 {model} 的 max_token 配置不是整数,请检查配置文件。")
continue
model_info.update({
model: {
"fn_with_ui": ollama_ui,
"fn_without_ui": ollama_noui,
"endpoint": ollama_endpoint,
"max_token": max_token_tmp,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
})
# -=-=-=-=-=-=- azure模型对齐支持 -=-=-=-=-=-=-
AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY") # <-- 用于定义和切换多个azure模型 -->
@@ -810,6 +1075,13 @@ if len(AZURE_CFG_ARRAY) > 0:
AVAIL_LLM_MODELS += [azure_model_name]
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
# -=-=-=-=-=-=-=-=-=- ☝️ 以上是模型路由 -=-=-=-=-=-=-=-=-=
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
# -=-=-=-=-=-=-= 👇 以下是多模型路由切换函数 -=-=-=-=-=-=-=
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
def LLM_CATCH_EXCEPTION(f):
@@ -846,13 +1118,11 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys
model = llm_kwargs['llm_model']
n_model = 1
if '&' not in model:
# 如果只询问1个大语言模型
# 如果只询问“一个”大语言模型(多数情况):
method = model_info[model]["fn_without_ui"]
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
else:
# 如果同时询问多个大语言模型,这个稍微啰嗦一点,但思路相同,您不必读这个else分支
# 如果同时询问“多个”大语言模型,这个稍微啰嗦一点,但思路相同,您不必读这个else分支
executor = ThreadPoolExecutor(max_workers=4)
models = model.split('&')
n_model = len(models)
@@ -905,8 +1175,26 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys
res = '<br/><br/>\n\n---\n\n'.join(return_string_collect)
return res
# 根据基础功能区 ModelOverride 参数调整模型类型,用于 `predict` 中
import importlib
import core_functional
def execute_model_override(llm_kwargs, additional_fn, method):
functional = core_functional.get_core_functions()
if (additional_fn in functional) and 'ModelOverride' in functional[additional_fn]:
# 热更新Prompt & ModelOverride
importlib.reload(core_functional)
functional = core_functional.get_core_functions()
model_override = functional[additional_fn]['ModelOverride']
if model_override not in model_info:
raise ValueError(f"模型覆盖参数 '{model_override}' 指向一个暂不支持的模型,请检查配置文件。")
method = model_info[model_override]["fn_with_ui"]
llm_kwargs['llm_model'] = model_override
return llm_kwargs, additional_fn, method
# 默认返回原参数
return llm_kwargs, additional_fn, method
def predict(inputs:str, llm_kwargs:dict, *args, **kwargs):
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot,
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
"""
发送至LLM,流式获取输出。
用于基础的对话功能。
@@ -925,6 +1213,11 @@ def predict(inputs:str, llm_kwargs:dict, *args, **kwargs):
"""
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项
yield from method(inputs, llm_kwargs, *args, **kwargs)
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项
if additional_fn: # 根据基础功能区 ModelOverride 参数调整模型类型
llm_kwargs, additional_fn, method = execute_model_override(llm_kwargs, additional_fn, method)
yield from method(inputs, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, stream, additional_fn)

查看文件

@@ -6,7 +6,6 @@ from toolbox import get_conf, ProxyNetworkActivate
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 Local Model
# ------------------------------------------------------------------------------------------------------------------------
@@ -23,20 +22,45 @@ class GetGLM3Handle(LocalLLMHandle):
import os, glob
import os
import platform
LOCAL_MODEL_QUANT, device = get_conf('LOCAL_MODEL_QUANT', 'LOCAL_MODEL_DEVICE')
if LOCAL_MODEL_QUANT == "INT4": # INT4
_model_name_ = "THUDM/chatglm3-6b-int4"
elif LOCAL_MODEL_QUANT == "INT8": # INT8
_model_name_ = "THUDM/chatglm3-6b-int8"
else:
_model_name_ = "THUDM/chatglm3-6b" # FP16
with ProxyNetworkActivate('Download_LLM'):
chatglm_tokenizer = AutoTokenizer.from_pretrained(_model_name_, trust_remote_code=True)
if device=='cpu':
chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True, device='cpu').float()
LOCAL_MODEL_QUANT, device = get_conf("LOCAL_MODEL_QUANT", "LOCAL_MODEL_DEVICE")
_model_name_ = "THUDM/chatglm3-6b"
# if LOCAL_MODEL_QUANT == "INT4": # INT4
# _model_name_ = "THUDM/chatglm3-6b-int4"
# elif LOCAL_MODEL_QUANT == "INT8": # INT8
# _model_name_ = "THUDM/chatglm3-6b-int8"
# else:
# _model_name_ = "THUDM/chatglm3-6b" # FP16
with ProxyNetworkActivate("Download_LLM"):
chatglm_tokenizer = AutoTokenizer.from_pretrained(
_model_name_, trust_remote_code=True
)
if device == "cpu":
chatglm_model = AutoModel.from_pretrained(
_model_name_,
trust_remote_code=True,
device="cpu",
).float()
elif LOCAL_MODEL_QUANT == "INT4": # INT4
chatglm_model = AutoModel.from_pretrained(
pretrained_model_name_or_path=_model_name_,
trust_remote_code=True,
device="cuda",
load_in_4bit=True,
)
elif LOCAL_MODEL_QUANT == "INT8": # INT8
chatglm_model = AutoModel.from_pretrained(
pretrained_model_name_or_path=_model_name_,
trust_remote_code=True,
device="cuda",
load_in_8bit=True,
)
else:
chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True, device='cuda')
chatglm_model = AutoModel.from_pretrained(
pretrained_model_name_or_path=_model_name_,
trust_remote_code=True,
device="cuda",
)
chatglm_model = chatglm_model.eval()
self._model = chatglm_model
@@ -46,32 +70,36 @@ class GetGLM3Handle(LocalLLMHandle):
def llm_stream_generator(self, **kwargs):
# 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
def adaptor(kwargs):
query = kwargs['query']
max_length = kwargs['max_length']
top_p = kwargs['top_p']
temperature = kwargs['temperature']
history = kwargs['history']
query = kwargs["query"]
max_length = kwargs["max_length"]
top_p = kwargs["top_p"]
temperature = kwargs["temperature"]
history = kwargs["history"]
return query, max_length, top_p, temperature, history
query, max_length, top_p, temperature, history = adaptor(kwargs)
for response, history in self._model.stream_chat(self._tokenizer,
query,
history,
max_length=max_length,
top_p=top_p,
temperature=temperature,
):
for response, history in self._model.stream_chat(
self._tokenizer,
query,
history,
max_length=max_length,
top_p=top_p,
temperature=temperature,
):
yield response
def try_to_import_special_deps(self, **kwargs):
# import something that will raise error if the user does not install requirement_*.txt
# 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行
import importlib
# importlib.import_module('modelscope')
# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 GPT-Academic Interface
# ------------------------------------------------------------------------------------------------------------------------
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetGLM3Handle, model_name, history_format='chatglm3')
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(
GetGLM3Handle, model_name, history_format="chatglm3"
)

查看文件

@@ -1,5 +1,3 @@
# 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目
"""
该文件中主要包含三个函数
@@ -11,19 +9,19 @@
"""
import json
import os
import re
import time
import gradio as gr
import logging
import traceback
import requests
import importlib
import random
# config_private.py放自己的秘密如API和代理网址
# 读取时首先看是否存在私密的config_private配置文件不受git管控,如果有,则覆盖原config文件
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history
from toolbox import trimmed_format_exc, is_the_upload_folder, read_one_api_model_name, log_chat
from toolbox import ChatBotWithCookies
from toolbox import ChatBotWithCookies, have_any_recent_upload_image_files, encode_image
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY')
@@ -41,6 +39,57 @@ def get_full_error(chunk, stream_response):
break
return chunk
def make_multimodal_input(inputs, image_paths):
image_base64_array = []
for image_path in image_paths:
path = os.path.abspath(image_path)
base64 = encode_image(path)
inputs = inputs + f'<br/><br/><div align="center"><img src="file={path}" base64="{base64}"></div>'
image_base64_array.append(base64)
return inputs, image_base64_array
def reverse_base64_from_input(inputs):
# 定义一个正则表达式来匹配 Base64 字符串(假设格式为 base64="<Base64编码>"
# pattern = re.compile(r'base64="([^"]+)"></div>')
pattern = re.compile(r'<br/><br/><div align="center"><img[^<>]+base64="([^"]+)"></div>')
# 使用 findall 方法查找所有匹配的 Base64 字符串
base64_strings = pattern.findall(inputs)
# 返回反转后的 Base64 字符串列表
return base64_strings
def contain_base64(inputs):
base64_strings = reverse_base64_from_input(inputs)
return len(base64_strings) > 0
def append_image_if_contain_base64(inputs):
if not contain_base64(inputs):
return inputs
else:
image_base64_array = reverse_base64_from_input(inputs)
pattern = re.compile(r'<br/><br/><div align="center"><img[^><]+></div>')
inputs = re.sub(pattern, '', inputs)
res = []
res.append({
"type": "text",
"text": inputs
})
for image_base64 in image_base64_array:
res.append({
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{image_base64}"
}
})
return res
def remove_image_if_contain_base64(inputs):
if not contain_base64(inputs):
return inputs
else:
pattern = re.compile(r'<br/><br/><div align="center"><img[^><]+></div>')
inputs = re.sub(pattern, '', inputs)
return inputs
def decode_chunk(chunk):
# 提前读取一些信息 (用于判断异常)
chunk_decoded = chunk.decode()
@@ -159,6 +208,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
additional_fn代表点击的哪个按钮,按钮见functional.py
"""
from .bridge_all import model_info
if is_any_api_key(inputs):
chatbot._cookies['api_key'] = inputs
chatbot.append(("输入已识别为openai的api_key", what_keys(inputs)))
@@ -174,9 +224,17 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
from core_functional import handle_core_functionality
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
raw_input = inputs
# logging.info(f'[raw_input] {raw_input}')
chatbot.append((inputs, ""))
# 多模态模型
has_multimodal_capacity = model_info[llm_kwargs['llm_model']].get('has_multimodal_capacity', False)
if has_multimodal_capacity:
has_recent_image_upload, image_paths = have_any_recent_upload_image_files(chatbot, pop=True)
else:
has_recent_image_upload, image_paths = False, []
if has_recent_image_upload:
_inputs, image_base64_array = make_multimodal_input(inputs, image_paths)
else:
_inputs, image_base64_array = inputs, []
chatbot.append((_inputs, ""))
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
# check mis-behavior
@@ -186,7 +244,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
time.sleep(2)
try:
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, image_base64_array, has_multimodal_capacity, stream)
except RuntimeError as e:
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
@@ -194,7 +252,6 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
# 检查endpoint是否合法
try:
from .bridge_all import model_info
endpoint = verify_endpoint(model_info[llm_kwargs['llm_model']]['endpoint'])
except:
tb_str = '```\n' + trimmed_format_exc() + '```'
@@ -202,7 +259,11 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
yield from update_ui(chatbot=chatbot, history=history, msg="Endpoint不满足要求") # 刷新界面
return
history.append(inputs); history.append("")
# 加入历史
if has_recent_image_upload:
history.extend([_inputs, ""])
else:
history.extend([inputs, ""])
retry = 0
while True:
@@ -316,14 +377,17 @@ def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}")
return chatbot, history
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
def generate_payload(inputs:str, llm_kwargs:dict, history:list, system_prompt:str, image_base64_array:list=[], has_multimodal_capacity:bool=False, stream:bool=True):
"""
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
"""
if not is_any_api_key(llm_kwargs['api_key']):
raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案在config.py中配置。")
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
if llm_kwargs['llm_model'].startswith('vllm-'):
api_key = 'no-api-key'
else:
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
headers = {
"Content-Type": "application/json",
@@ -336,36 +400,83 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
azure_api_key_unshared = AZURE_CFG_ARRAY[llm_kwargs['llm_model']]["AZURE_API_KEY"]
headers.update({"api-key": azure_api_key_unshared})
conversation_cnt = len(history) // 2
if has_multimodal_capacity:
# 当以下条件满足时,启用多模态能力:
# 1. 模型本身是多模态模型has_multimodal_capacity
# 2. 输入包含图像len(image_base64_array) > 0
# 3. 历史输入包含图像( any([contain_base64(h) for h in history])
enable_multimodal_capacity = (len(image_base64_array) > 0) or any([contain_base64(h) for h in history])
else:
enable_multimodal_capacity = False
if not enable_multimodal_capacity:
# 不使用多模态能力
conversation_cnt = len(history) // 2
messages = [{"role": "system", "content": system_prompt}]
if conversation_cnt:
for index in range(0, 2*conversation_cnt, 2):
what_i_have_asked = {}
what_i_have_asked["role"] = "user"
what_i_have_asked["content"] = remove_image_if_contain_base64(history[index])
what_gpt_answer = {}
what_gpt_answer["role"] = "assistant"
what_gpt_answer["content"] = remove_image_if_contain_base64(history[index+1])
if what_i_have_asked["content"] != "":
if what_gpt_answer["content"] == "": continue
if what_gpt_answer["content"] == timeout_bot_msg: continue
messages.append(what_i_have_asked)
messages.append(what_gpt_answer)
else:
messages[-1]['content'] = what_gpt_answer['content']
what_i_ask_now = {}
what_i_ask_now["role"] = "user"
what_i_ask_now["content"] = inputs
messages.append(what_i_ask_now)
else:
# 多模态能力
conversation_cnt = len(history) // 2
messages = [{"role": "system", "content": system_prompt}]
if conversation_cnt:
for index in range(0, 2*conversation_cnt, 2):
what_i_have_asked = {}
what_i_have_asked["role"] = "user"
what_i_have_asked["content"] = append_image_if_contain_base64(history[index])
what_gpt_answer = {}
what_gpt_answer["role"] = "assistant"
what_gpt_answer["content"] = append_image_if_contain_base64(history[index+1])
if what_i_have_asked["content"] != "":
if what_gpt_answer["content"] == "": continue
if what_gpt_answer["content"] == timeout_bot_msg: continue
messages.append(what_i_have_asked)
messages.append(what_gpt_answer)
else:
messages[-1]['content'] = what_gpt_answer['content']
what_i_ask_now = {}
what_i_ask_now["role"] = "user"
what_i_ask_now["content"] = []
what_i_ask_now["content"].append({
"type": "text",
"text": inputs
})
for image_base64 in image_base64_array:
what_i_ask_now["content"].append({
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{image_base64}"
}
})
messages.append(what_i_ask_now)
messages = [{"role": "system", "content": system_prompt}]
if conversation_cnt:
for index in range(0, 2*conversation_cnt, 2):
what_i_have_asked = {}
what_i_have_asked["role"] = "user"
what_i_have_asked["content"] = history[index]
what_gpt_answer = {}
what_gpt_answer["role"] = "assistant"
what_gpt_answer["content"] = history[index+1]
if what_i_have_asked["content"] != "":
if what_gpt_answer["content"] == "": continue
if what_gpt_answer["content"] == timeout_bot_msg: continue
messages.append(what_i_have_asked)
messages.append(what_gpt_answer)
else:
messages[-1]['content'] = what_gpt_answer['content']
what_i_ask_now = {}
what_i_ask_now["role"] = "user"
what_i_ask_now["content"] = inputs
messages.append(what_i_ask_now)
model = llm_kwargs['llm_model']
if llm_kwargs['llm_model'].startswith('api2d-'):
model = llm_kwargs['llm_model'][len('api2d-'):]
if llm_kwargs['llm_model'].startswith('one-api-'):
model = llm_kwargs['llm_model'][len('one-api-'):]
model, _ = read_one_api_model_name(model)
if llm_kwargs['llm_model'].startswith('vllm-'):
model = llm_kwargs['llm_model'][len('vllm-'):]
model, _ = read_one_api_model_name(model)
if model == "gpt-3.5-random": # 随机选择, 绕过openai访问频率限制
model = random.choice([
"gpt-3.5-turbo",
@@ -384,13 +495,11 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
"top_p": llm_kwargs['top_p'], # 1.0,
"n": 1,
"stream": stream,
"presence_penalty": 0,
"frequency_penalty": 0,
}
try:
print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
except:
print('输入中可能存在乱码。')
# try:
# print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
# except:
# print('输入中可能存在乱码。')
return headers,payload

查看文件

@@ -27,10 +27,8 @@ timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check
def report_invalid_key(key):
if get_conf("BLOCK_INVALID_APIKEY"):
# 实验性功能,自动检测并屏蔽失效的KEY,请勿使用
from request_llms.key_manager import ApiKeyManager
api_key = ApiKeyManager().add_key_to_blacklist(key)
# 弃用功能
return
def get_full_error(chunk, stream_response):
"""

查看文件

@@ -17,7 +17,7 @@ import json
import requests
from toolbox import get_conf, update_ui, trimmed_format_exc, encode_image, every_image_file_in_path, log_chat
picture_system_prompt = "\n当回复图像时,必须说明正在回复哪张图像。所有图像仅在最后一个问题中提供,即使它们在历史记录中被提及。请使用'这是第X张图像:'的格式来指明您正在描述的是哪张图像。"
Claude_3_Models = ["claude-3-haiku-20240307", "claude-3-sonnet-20240229", "claude-3-opus-20240229"]
Claude_3_Models = ["claude-3-haiku-20240307", "claude-3-sonnet-20240229", "claude-3-opus-20240229", "claude-3-5-sonnet-20240620"]
# config_private.py放自己的秘密如API和代理网址
# 读取时首先看是否存在私密的config_private配置文件不受git管控,如果有,则覆盖原config文件

查看文件

@@ -8,7 +8,7 @@ import os
import time
from request_llms.com_google import GoogleChatInit
from toolbox import ChatBotWithCookies
from toolbox import get_conf, update_ui, update_ui_lastest_msg, have_any_recent_upload_image_files, trimmed_format_exc
from toolbox import get_conf, update_ui, update_ui_lastest_msg, have_any_recent_upload_image_files, trimmed_format_exc, log_chat
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY')
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
@@ -99,6 +99,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
gpt_replying_buffer += paraphrase['text'] # 使用 json 解析库进行处理
chatbot[-1] = (inputs, gpt_replying_buffer)
history[-1] = gpt_replying_buffer
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
yield from update_ui(chatbot=chatbot, history=history)
if error_match:
history = history[-2] # 错误的不纳入对话

查看文件

@@ -22,8 +22,9 @@ import random
# config_private.py放自己的秘密如API和代理网址
# 读取时首先看是否存在私密的config_private配置文件不受git管控,如果有,则覆盖原config文件
from toolbox import get_conf, update_ui, trimmed_format_exc, is_the_upload_folder, read_one_api_model_name
proxies, TIMEOUT_SECONDS, MAX_RETRY, YIMODEL_API_KEY = \
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'YIMODEL_API_KEY')
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf(
"proxies", "TIMEOUT_SECONDS", "MAX_RETRY"
)
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
@@ -45,8 +46,8 @@ def decode_chunk(chunk):
chunkjson = None
is_last_chunk = False
try:
chunkjson = json.loads(chunk_decoded[6:])
is_last_chunk = chunkjson.get("lastOne", False)
chunkjson = json.loads(chunk_decoded)
is_last_chunk = chunkjson.get("done", False)
except:
pass
return chunk_decoded, chunkjson, is_last_chunk
@@ -84,7 +85,6 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
stream_response = response.iter_lines()
result = ''
is_head_of_the_stream = True
while True:
try: chunk = next(stream_response)
except StopIteration:
@@ -92,21 +92,18 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
except requests.exceptions.ConnectionError:
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
chunk_decoded, chunkjson, is_last_chunk = decode_chunk(chunk)
if is_head_of_the_stream and (r'"object":"error"' not in chunk_decoded) and (r'"role":"assistant"' in chunk_decoded):
# 数据流的第一帧不携带content
is_head_of_the_stream = False; continue
if chunk:
try:
if is_last_chunk:
# 判定为数据流的结束,gpt_replying_buffer也写完了
logging.info(f'[response] {result}')
break
result += chunkjson['choices'][0]["delta"]["content"]
if not console_slience: print(chunkjson['choices'][0]["delta"]["content"], end='')
result += chunkjson['message']["content"]
if not console_slience: print(chunkjson['message']["content"], end='')
if observe_window is not None:
# 观测窗,把已经获取的数据显示出去
if len(observe_window) >= 1:
observe_window[0] += chunkjson['choices'][0]["delta"]["content"]
observe_window[0] += chunkjson['message']["content"]
# 看门狗,如果超过期限没有喂狗,则终止
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience:
@@ -130,8 +127,6 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
chatbot 为WebUI中显示的对话列表修改它然后yeild出去可以直接修改对话界面内容
additional_fn代表点击的哪个按钮按钮见functional.py
"""
if len(YIMODEL_API_KEY) == 0:
raise RuntimeError("没有设置YIMODEL_API_KEY选项")
if inputs == "": inputs = "空空如也的输入栏"
user_input = inputs
if additional_fn is not None:
@@ -171,7 +166,6 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
gpt_replying_buffer = ""
is_head_of_the_stream = True
if stream:
stream_response = response.iter_lines()
while True:
@@ -185,10 +179,6 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
# 提前读取一些信息 (用于判断异常)
chunk_decoded, chunkjson, is_last_chunk = decode_chunk(chunk)
if is_head_of_the_stream and (r'"object":"error"' not in chunk_decoded) and (r'"role":"assistant"' in chunk_decoded):
# 数据流的第一帧不携带content
is_head_of_the_stream = False; continue
if chunk:
try:
if is_last_chunk:
@@ -196,8 +186,11 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
logging.info(f'[response] {gpt_replying_buffer}')
break
# 处理数据流的主体
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
try:
status_text = f"finish_reason: {chunkjson['error'].get('message', 'null')}"
except:
status_text = "finish_reason: null"
gpt_replying_buffer = gpt_replying_buffer + chunkjson['message']["content"]
# 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
history[-1] = gpt_replying_buffer
chatbot[-1] = (history[-2], history[-1])
@@ -234,11 +227,9 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
"""
整合所有信息选择LLM模型生成http请求为发送请求做准备
"""
api_key = f"Bearer {YIMODEL_API_KEY}"
headers = {
"Content-Type": "application/json",
"Authorization": api_key
}
conversation_cnt = len(history) // 2
@@ -265,19 +256,17 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
what_i_ask_now["content"] = inputs
messages.append(what_i_ask_now)
model = llm_kwargs['llm_model']
if llm_kwargs['llm_model'].startswith('one-api-'):
model = llm_kwargs['llm_model'][len('one-api-'):]
if llm_kwargs['llm_model'].startswith('ollama-'):
model = llm_kwargs['llm_model'][len('ollama-'):]
model, _ = read_one_api_model_name(model)
tokens = 600 if llm_kwargs['llm_model'] == 'yi-34b-chat-0205' else 4096 #yi-34b-chat-0205只有4k上下文...
options = {"temperature": llm_kwargs['temperature']}
payload = {
"model": model,
"messages": messages,
"temperature": llm_kwargs['temperature'], # 1.0,
"stream": stream,
"max_tokens": tokens
"options": options,
}
try:
print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
except:
print('输入中可能存在乱码。')
return headers,payload
return headers,payload

查看文件

@@ -82,6 +82,9 @@ def generate_from_baidu_qianfan(inputs, llm_kwargs, history, system_prompt):
"ERNIE-Bot": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions",
"ERNIE-Bot-turbo": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant",
"BLOOMZ-7B": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/bloomz_7b1",
"ERNIE-Speed-128K": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-speed-128k",
"ERNIE-Speed-8K": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie_speed",
"ERNIE-Lite-8K": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-lite-8k",
"Llama-2-70B-Chat": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/llama_2_70b",
"Llama-2-13B-Chat": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/llama_2_13b",
@@ -165,4 +168,4 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
tb_str = '```\n' + trimmed_format_exc() + '```'
chatbot[-1] = (chatbot[-1][0], tb_str)
yield from update_ui(chatbot=chatbot, history=history, msg="异常") # 刷新界面
return
return

查看文件

@@ -1,7 +1,7 @@
import time
import os
from toolbox import update_ui, get_conf, update_ui_lastest_msg
from toolbox import check_packages, report_exception
from toolbox import check_packages, report_exception, log_chat
model_name = 'Qwen'
@@ -59,6 +59,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
# 总结输出
if response == f"[Local Message] 等待{model_name}响应中 ...":
response = f"[Local Message] {model_name}响应异常 ..."

查看文件

@@ -1,69 +1,69 @@
import time
from toolbox import update_ui, get_conf, update_ui_lastest_msg
from toolbox import check_packages, report_exception
model_name = '云雀大模型'
def validate_key():
YUNQUE_SECRET_KEY = get_conf("YUNQUE_SECRET_KEY")
if YUNQUE_SECRET_KEY == '': return False
return True
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
observe_window:list=[], console_slience:bool=False):
"""
⭐ 多线程方法
函数的说明请见 request_llms/bridge_all.py
"""
watch_dog_patience = 5
response = ""
if validate_key() is False:
raise RuntimeError('请配置YUNQUE_SECRET_KEY')
from .com_skylark2api import YUNQUERequestInstance
sri = YUNQUERequestInstance()
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
if len(observe_window) >= 1:
observe_window[0] = response
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
return response
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
⭐ 单线程方法
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, ""))
yield from update_ui(chatbot=chatbot, history=history)
# 尝试导入依赖,如果缺少依赖,则给出安装建议
try:
check_packages(["zhipuai"])
except:
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
chatbot=chatbot, history=history, delay=0)
return
if validate_key() is False:
yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置HUOSHAN_API_KEY", chatbot=chatbot, history=history, delay=0)
return
if additional_fn is not None:
from core_functional import handle_core_functionality
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
# 开始接收回复
from .com_skylark2api import YUNQUERequestInstance
sri = YUNQUERequestInstance()
response = f"[Local Message] 等待{model_name}响应中 ..."
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)
# 总结输出
if response == f"[Local Message] 等待{model_name}响应中 ...":
response = f"[Local Message] {model_name}响应异常 ..."
history.extend([inputs, response])
import time
from toolbox import update_ui, get_conf, update_ui_lastest_msg
from toolbox import check_packages, report_exception
model_name = '云雀大模型'
def validate_key():
YUNQUE_SECRET_KEY = get_conf("YUNQUE_SECRET_KEY")
if YUNQUE_SECRET_KEY == '': return False
return True
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
observe_window:list=[], console_slience:bool=False):
"""
⭐ 多线程方法
函数的说明请见 request_llms/bridge_all.py
"""
watch_dog_patience = 5
response = ""
if validate_key() is False:
raise RuntimeError('请配置YUNQUE_SECRET_KEY')
from .com_skylark2api import YUNQUERequestInstance
sri = YUNQUERequestInstance()
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
if len(observe_window) >= 1:
observe_window[0] = response
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
return response
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
⭐ 单线程方法
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, ""))
yield from update_ui(chatbot=chatbot, history=history)
# 尝试导入依赖,如果缺少依赖,则给出安装建议
try:
check_packages(["zhipuai"])
except:
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
chatbot=chatbot, history=history, delay=0)
return
if validate_key() is False:
yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置HUOSHAN_API_KEY", chatbot=chatbot, history=history, delay=0)
return
if additional_fn is not None:
from core_functional import handle_core_functionality
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
# 开始接收回复
from .com_skylark2api import YUNQUERequestInstance
sri = YUNQUERequestInstance()
response = f"[Local Message] 等待{model_name}响应中 ..."
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)
# 总结输出
if response == f"[Local Message] 等待{model_name}响应中 ...":
response = f"[Local Message] {model_name}响应异常 ..."
history.extend([inputs, response])
yield from update_ui(chatbot=chatbot, history=history)

查看文件

@@ -0,0 +1,72 @@
import time
import os
from toolbox import update_ui, get_conf, update_ui_lastest_msg, log_chat
from toolbox import check_packages, report_exception, have_any_recent_upload_image_files
from toolbox import ChatBotWithCookies
# model_name = 'Taichu-2.0'
# taichu_default_model = 'taichu_llm'
def validate_key():
TAICHU_API_KEY = get_conf("TAICHU_API_KEY")
if TAICHU_API_KEY == '': return False
return True
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
observe_window:list=[], console_slience:bool=False):
"""
⭐多线程方法
函数的说明请见 request_llms/bridge_all.py
"""
watch_dog_patience = 5
response = ""
# if llm_kwargs["llm_model"] == "taichu":
# llm_kwargs["llm_model"] = "taichu"
if validate_key() is False:
raise RuntimeError('请配置 TAICHU_API_KEY')
# 开始接收回复
from .com_taichu import TaichuChatInit
zhipu_bro_init = TaichuChatInit()
for chunk, response in zhipu_bro_init.generate_chat(inputs, llm_kwargs, history, sys_prompt):
if len(observe_window) >= 1:
observe_window[0] = response
if len(observe_window) >= 2:
if (time.time() - observe_window[1]) > watch_dog_patience:
raise RuntimeError("程序终止。")
return response
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
"""
⭐单线程方法
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append([inputs, ""])
yield from update_ui(chatbot=chatbot, history=history)
if validate_key() is False:
yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置ZHIPUAI_API_KEY", chatbot=chatbot, history=history, delay=0)
return
if additional_fn is not None:
from core_functional import handle_core_functionality
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
chatbot[-1] = [inputs, ""]
yield from update_ui(chatbot=chatbot, history=history)
# if llm_kwargs["llm_model"] == "taichu":
# llm_kwargs["llm_model"] = taichu_default_model
# 开始接收回复
from .com_taichu import TaichuChatInit
zhipu_bro_init = TaichuChatInit()
for chunk, response in zhipu_bro_init.generate_chat(inputs, llm_kwargs, history, system_prompt):
chatbot[-1] = [inputs, response]
yield from update_ui(chatbot=chatbot, history=history)
history.extend([inputs, response])
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
yield from update_ui(chatbot=chatbot, history=history)

查看文件

@@ -75,6 +75,10 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
llm_kwargs["llm_model"] = zhipuai_default_model
if llm_kwargs["llm_model"] in ["glm-4v"]:
if (len(inputs) + sum(len(temp) for temp in history) + 1047) > 2000:
chatbot.append((inputs, "上下文长度超过glm-4v上限2000tokens,注意图片大约占用1,047个tokens"))
yield from update_ui(chatbot=chatbot, history=history)
return
have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot)
if not have_recent_file:
chatbot.append((inputs, "没有检测到任何近期上传的图像文件,请上传jpg格式的图片,此外,请注意拓展名需要小写"))

查看文件

@@ -65,8 +65,12 @@ class QwenRequestInstance():
self.result_buf += f"[Local Message] 请求错误:状态码:{response.status_code},错误码:{response.code},消息:{response.message}"
yield self.result_buf
break
logging.info(f'[raw_input] {inputs}')
logging.info(f'[response] {self.result_buf}')
# 耗尽generator避免报错
while True:
try: next(responses)
except: break
return self.result_buf

查看文件

@@ -1,95 +1,95 @@
from toolbox import get_conf
import threading
import logging
import os
timeout_bot_msg = '[Local Message] Request timeout. Network error.'
#os.environ['VOLC_ACCESSKEY'] = ''
#os.environ['VOLC_SECRETKEY'] = ''
class YUNQUERequestInstance():
def __init__(self):
self.time_to_yield_event = threading.Event()
self.time_to_exit_event = threading.Event()
self.result_buf = ""
def generate(self, inputs, llm_kwargs, history, system_prompt):
# import _thread as thread
from volcengine.maas import MaasService, MaasException
maas = MaasService('maas-api.ml-platform-cn-beijing.volces.com', 'cn-beijing')
YUNQUE_SECRET_KEY, YUNQUE_ACCESS_KEY,YUNQUE_MODEL = get_conf("YUNQUE_SECRET_KEY", "YUNQUE_ACCESS_KEY","YUNQUE_MODEL")
maas.set_ak(YUNQUE_ACCESS_KEY) #填写 VOLC_ACCESSKEY
maas.set_sk(YUNQUE_SECRET_KEY) #填写 'VOLC_SECRETKEY'
self.result_buf = ""
req = {
"model": {
"name": YUNQUE_MODEL,
"version": "1.0", # use default version if not specified.
},
"parameters": {
"max_new_tokens": 4000, # 输出文本的最大tokens限制
"min_new_tokens": 1, # 输出文本的最小tokens限制
"temperature": llm_kwargs['temperature'], # 用于控制生成文本的随机性和创造性,Temperature值越大随机性越大,取值范围0~1
"top_p": llm_kwargs['top_p'], # 用于控制输出tokens的多样性,TopP值越大输出的tokens类型越丰富,取值范围0~1
"top_k": 0, # 选择预测值最大的k个token进行采样,取值范围0-1000,0表示不生效
"max_prompt_tokens": 4000, # 最大输入 token 数,如果给出的 prompt 的 token 长度超过此限制,取最后 max_prompt_tokens 个 token 输入模型。
},
"messages": self.generate_message_payload(inputs, llm_kwargs, history, system_prompt)
}
response = maas.stream_chat(req)
for resp in response:
self.result_buf += resp.choice.message.content
yield self.result_buf
'''
for event in response.events():
if event.event == "add":
self.result_buf += event.data
yield self.result_buf
elif event.event == "error" or event.event == "interrupted":
raise RuntimeError("Unknown error:" + event.data)
elif event.event == "finish":
yield self.result_buf
break
else:
raise RuntimeError("Unknown error:" + str(event))
logging.info(f'[raw_input] {inputs}')
logging.info(f'[response] {self.result_buf}')
'''
return self.result_buf
def generate_message_payload(inputs, llm_kwargs, history, system_prompt):
from volcengine.maas import ChatRole
conversation_cnt = len(history) // 2
messages = [{"role": ChatRole.USER, "content": system_prompt},
{"role": ChatRole.ASSISTANT, "content": "Certainly!"}]
if conversation_cnt:
for index in range(0, 2 * conversation_cnt, 2):
what_i_have_asked = {}
what_i_have_asked["role"] = ChatRole.USER
what_i_have_asked["content"] = history[index]
what_gpt_answer = {}
what_gpt_answer["role"] = ChatRole.ASSISTANT
what_gpt_answer["content"] = history[index + 1]
if what_i_have_asked["content"] != "":
if what_gpt_answer["content"] == "":
continue
if what_gpt_answer["content"] == timeout_bot_msg:
continue
messages.append(what_i_have_asked)
messages.append(what_gpt_answer)
else:
messages[-1]['content'] = what_gpt_answer['content']
what_i_ask_now = {}
what_i_ask_now["role"] = ChatRole.USER
what_i_ask_now["content"] = inputs
messages.append(what_i_ask_now)
from toolbox import get_conf
import threading
import logging
import os
timeout_bot_msg = '[Local Message] Request timeout. Network error.'
#os.environ['VOLC_ACCESSKEY'] = ''
#os.environ['VOLC_SECRETKEY'] = ''
class YUNQUERequestInstance():
def __init__(self):
self.time_to_yield_event = threading.Event()
self.time_to_exit_event = threading.Event()
self.result_buf = ""
def generate(self, inputs, llm_kwargs, history, system_prompt):
# import _thread as thread
from volcengine.maas import MaasService, MaasException
maas = MaasService('maas-api.ml-platform-cn-beijing.volces.com', 'cn-beijing')
YUNQUE_SECRET_KEY, YUNQUE_ACCESS_KEY,YUNQUE_MODEL = get_conf("YUNQUE_SECRET_KEY", "YUNQUE_ACCESS_KEY","YUNQUE_MODEL")
maas.set_ak(YUNQUE_ACCESS_KEY) #填写 VOLC_ACCESSKEY
maas.set_sk(YUNQUE_SECRET_KEY) #填写 'VOLC_SECRETKEY'
self.result_buf = ""
req = {
"model": {
"name": YUNQUE_MODEL,
"version": "1.0", # use default version if not specified.
},
"parameters": {
"max_new_tokens": 4000, # 输出文本的最大tokens限制
"min_new_tokens": 1, # 输出文本的最小tokens限制
"temperature": llm_kwargs['temperature'], # 用于控制生成文本的随机性和创造性,Temperature值越大随机性越大,取值范围0~1
"top_p": llm_kwargs['top_p'], # 用于控制输出tokens的多样性,TopP值越大输出的tokens类型越丰富,取值范围0~1
"top_k": 0, # 选择预测值最大的k个token进行采样,取值范围0-1000,0表示不生效
"max_prompt_tokens": 4000, # 最大输入 token 数,如果给出的 prompt 的 token 长度超过此限制,取最后 max_prompt_tokens 个 token 输入模型。
},
"messages": self.generate_message_payload(inputs, llm_kwargs, history, system_prompt)
}
response = maas.stream_chat(req)
for resp in response:
self.result_buf += resp.choice.message.content
yield self.result_buf
'''
for event in response.events():
if event.event == "add":
self.result_buf += event.data
yield self.result_buf
elif event.event == "error" or event.event == "interrupted":
raise RuntimeError("Unknown error:" + event.data)
elif event.event == "finish":
yield self.result_buf
break
else:
raise RuntimeError("Unknown error:" + str(event))
logging.info(f'[raw_input] {inputs}')
logging.info(f'[response] {self.result_buf}')
'''
return self.result_buf
def generate_message_payload(inputs, llm_kwargs, history, system_prompt):
from volcengine.maas import ChatRole
conversation_cnt = len(history) // 2
messages = [{"role": ChatRole.USER, "content": system_prompt},
{"role": ChatRole.ASSISTANT, "content": "Certainly!"}]
if conversation_cnt:
for index in range(0, 2 * conversation_cnt, 2):
what_i_have_asked = {}
what_i_have_asked["role"] = ChatRole.USER
what_i_have_asked["content"] = history[index]
what_gpt_answer = {}
what_gpt_answer["role"] = ChatRole.ASSISTANT
what_gpt_answer["content"] = history[index + 1]
if what_i_have_asked["content"] != "":
if what_gpt_answer["content"] == "":
continue
if what_gpt_answer["content"] == timeout_bot_msg:
continue
messages.append(what_i_have_asked)
messages.append(what_gpt_answer)
else:
messages[-1]['content'] = what_gpt_answer['content']
what_i_ask_now = {}
what_i_ask_now["role"] = ChatRole.USER
what_i_ask_now["content"] = inputs
messages.append(what_i_ask_now)
return messages

查看文件

@@ -67,6 +67,7 @@ class SparkRequestInstance():
self.gpt_url_v3 = "ws://spark-api.xf-yun.com/v3.1/chat"
self.gpt_url_v35 = "wss://spark-api.xf-yun.com/v3.5/chat"
self.gpt_url_img = "wss://spark-api.cn-huabei-1.xf-yun.com/v2.1/image"
self.gpt_url_v4 = "wss://spark-api.xf-yun.com/v4.0/chat"
self.time_to_yield_event = threading.Event()
self.time_to_exit_event = threading.Event()
@@ -94,6 +95,8 @@ class SparkRequestInstance():
gpt_url = self.gpt_url_v3
elif llm_kwargs['llm_model'] == 'sparkv3.5':
gpt_url = self.gpt_url_v35
elif llm_kwargs['llm_model'] == 'sparkv4':
gpt_url = self.gpt_url_v4
else:
gpt_url = self.gpt_url
file_manifest = []
@@ -194,6 +197,7 @@ def gen_params(appid, inputs, llm_kwargs, history, system_prompt, file_manifest)
"sparkv2": "generalv2",
"sparkv3": "generalv3",
"sparkv3.5": "generalv3.5",
"sparkv4": "4.0Ultra"
}
domains_select = domains[llm_kwargs['llm_model']]
if file_manifest: domains_select = 'image'

56
request_llms/com_taichu.py 普通文件
查看文件

@@ -0,0 +1,56 @@
# encoding: utf-8
# @Time : 2024/1/22
# @Author : Kilig947 & binary husky
# @Descr : 兼容最新的智谱Ai
from toolbox import get_conf
from toolbox import get_conf, encode_image, get_pictures_list
import logging, os, requests
import json
class TaichuChatInit:
def __init__(self): ...
def __conversation_user(self, user_input: str, llm_kwargs:dict):
return {"role": "user", "content": user_input}
def __conversation_history(self, history:list, llm_kwargs:dict):
messages = []
conversation_cnt = len(history) // 2
if conversation_cnt:
for index in range(0, 2 * conversation_cnt, 2):
what_i_have_asked = self.__conversation_user(history[index], llm_kwargs)
what_gpt_answer = {
"role": "assistant",
"content": history[index + 1]
}
messages.append(what_i_have_asked)
messages.append(what_gpt_answer)
return messages
def generate_chat(self, inputs:str, llm_kwargs:dict, history:list, system_prompt:str):
TAICHU_API_KEY = get_conf("TAICHU_API_KEY")
params = {
'api_key': TAICHU_API_KEY,
'model_code': 'taichu_llm',
'question': '\n\n'.join(history) + inputs,
'prefix': system_prompt,
'temperature': llm_kwargs.get('temperature', 0.95),
'stream_format': 'json'
}
api = 'https://ai-maas.wair.ac.cn/maas/v1/model_api/invoke'
response = requests.post(api, json=params, stream=True)
results = ""
if response.status_code == 200:
response.encoding = 'utf-8'
for line in response.iter_lines(decode_unicode=True):
try: delta = json.loads(line)['data']['content']
except: delta = json.loads(line)['choices'][0]['text']
results += delta
yield delta, results
else:
raise ValueError
if __name__ == '__main__':
zhipu = TaichuChatInit()
zhipu.generate_chat('你好', {'llm_model': 'glm-4'}, [], '你是WPSAi')

查看文件

@@ -36,8 +36,14 @@ class ZhipuChatInit:
what_i_have_asked = {"role": "user", "content": []}
what_i_have_asked['content'].append({"type": 'text', "text": user_input})
if encode_img:
if len(encode_img) > 1:
logging.warning("glm-4v只支持一张图片,将只取第一张图片进行处理")
print("glm-4v只支持一张图片,将只取第一张图片进行处理")
img_d = {"type": "image_url",
"image_url": {'url': encode_img}}
"image_url": {
"url": encode_img[0]['data']
}
}
what_i_have_asked['content'].append(img_d)
return what_i_have_asked

查看文件

@@ -0,0 +1,409 @@
import json
import time
import logging
import traceback
import requests
# config_private.py放自己的秘密如API和代理网址
# 读取时首先看是否存在私密的config_private配置文件不受git管控,如果有,则覆盖原config文件
from toolbox import (
get_conf,
update_ui,
is_the_upload_folder,
)
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf(
"proxies", "TIMEOUT_SECONDS", "MAX_RETRY"
)
timeout_bot_msg = (
"[Local Message] Request timeout. Network error. Please check proxy settings in config.py."
+ "网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。"
)
def get_full_error(chunk, stream_response):
"""
尝试获取完整的错误信息
"""
while True:
try:
chunk += next(stream_response)
except:
break
return chunk
def decode_chunk(chunk):
"""
用于解读"content""finish_reason"的内容
"""
chunk = chunk.decode()
respose = ""
finish_reason = "False"
try:
chunk = json.loads(chunk[6:])
except:
respose = ""
finish_reason = chunk
# 错误处理部分
if "error" in chunk:
respose = "API_ERROR"
try:
chunk = json.loads(chunk)
finish_reason = chunk["error"]["code"]
except:
finish_reason = "API_ERROR"
return respose, finish_reason
try:
respose = chunk["choices"][0]["delta"]["content"]
except:
pass
try:
finish_reason = chunk["choices"][0]["finish_reason"]
except:
pass
return respose, finish_reason
def generate_message(input, model, key, history, max_output_token, system_prompt, temperature):
"""
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
"""
api_key = f"Bearer {key}"
headers = {"Content-Type": "application/json", "Authorization": api_key}
conversation_cnt = len(history) // 2
messages = [{"role": "system", "content": system_prompt}]
if conversation_cnt:
for index in range(0, 2 * conversation_cnt, 2):
what_i_have_asked = {}
what_i_have_asked["role"] = "user"
what_i_have_asked["content"] = history[index]
what_gpt_answer = {}
what_gpt_answer["role"] = "assistant"
what_gpt_answer["content"] = history[index + 1]
if what_i_have_asked["content"] != "":
if what_gpt_answer["content"] == "":
continue
if what_gpt_answer["content"] == timeout_bot_msg:
continue
messages.append(what_i_have_asked)
messages.append(what_gpt_answer)
else:
messages[-1]["content"] = what_gpt_answer["content"]
what_i_ask_now = {}
what_i_ask_now["role"] = "user"
what_i_ask_now["content"] = input
messages.append(what_i_ask_now)
playload = {
"model": model,
"messages": messages,
"temperature": temperature,
"stream": True,
"max_tokens": max_output_token,
}
try:
print(f" {model} : {conversation_cnt} : {input[:100]} ..........")
except:
print("输入中可能存在乱码。")
return headers, playload
def get_predict_function(
api_key_conf_name,
max_output_token,
disable_proxy = False
):
"""
为openai格式的API生成响应函数,其中传入参数
api_key_conf_name
`config.py`中此模型的APIKEY的名字,例如"YIMODEL_API_KEY"
max_output_token
每次请求的最大token数量,例如对于01万物的yi-34b-chat-200k,其最大请求数为4096
请不要与模型的最大token数量相混淆。
disable_proxy
是否使用代理,True为不使用,False为使用。
"""
APIKEY = get_conf(api_key_conf_name)
def predict_no_ui_long_connection(
inputs,
llm_kwargs,
history=[],
sys_prompt="",
observe_window=None,
console_slience=False,
):
"""
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
inputs
是本次问询的输入
sys_prompt:
系统静默prompt
llm_kwargs
chatGPT的内部调优参数
history
是之前的对话列表
observe_window = None
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]观测窗。observe_window[1]:看门狗
"""
watch_dog_patience = 5 # 看门狗的耐心,设置5秒不准咬人(咬的也不是人
if len(APIKEY) == 0:
raise RuntimeError(f"APIKEY为空,请检查配置文件的{APIKEY}")
if inputs == "":
inputs = "你好👋"
headers, playload = generate_message(
input=inputs,
model=llm_kwargs["llm_model"],
key=APIKEY,
history=history,
max_output_token=max_output_token,
system_prompt=sys_prompt,
temperature=llm_kwargs["temperature"],
)
retry = 0
while True:
try:
from .bridge_all import model_info
endpoint = model_info[llm_kwargs["llm_model"]]["endpoint"]
if not disable_proxy:
response = requests.post(
endpoint,
headers=headers,
proxies=proxies,
json=playload,
stream=True,
timeout=TIMEOUT_SECONDS,
)
else:
response = requests.post(
endpoint,
headers=headers,
json=playload,
stream=True,
timeout=TIMEOUT_SECONDS,
)
break
except:
retry += 1
traceback.print_exc()
if retry > MAX_RETRY:
raise TimeoutError
if MAX_RETRY != 0:
print(f"请求超时,正在重试 ({retry}/{MAX_RETRY}) ……")
stream_response = response.iter_lines()
result = ""
finish_reason = ""
while True:
try:
chunk = next(stream_response)
except StopIteration:
if result == "":
raise RuntimeError(f"获得空的回复,可能原因:{finish_reason}")
break
except requests.exceptions.ConnectionError:
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
response_text, finish_reason = decode_chunk(chunk)
# 返回的数据流第一次为空,继续等待
if response_text == "" and finish_reason != "False":
continue
if response_text == "API_ERROR" and (
finish_reason != "False" or finish_reason != "stop"
):
chunk = get_full_error(chunk, stream_response)
chunk_decoded = chunk.decode()
print(chunk_decoded)
raise RuntimeError(
f"API异常,请检测终端输出。可能的原因是:{finish_reason}"
)
if chunk:
try:
if finish_reason == "stop":
logging.info(f"[response] {result}")
break
result += response_text
if not console_slience:
print(response_text, end="")
if observe_window is not None:
# 观测窗,把已经获取的数据显示出去
if len(observe_window) >= 1:
observe_window[0] += response_text
# 看门狗,如果超过期限没有喂狗,则终止
if len(observe_window) >= 2:
if (time.time() - observe_window[1]) > watch_dog_patience:
raise RuntimeError("用户取消了程序。")
except Exception as e:
chunk = get_full_error(chunk, stream_response)
chunk_decoded = chunk.decode()
error_msg = chunk_decoded
print(error_msg)
raise RuntimeError("Json解析不合常规")
return result
def predict(
inputs,
llm_kwargs,
plugin_kwargs,
chatbot,
history=[],
system_prompt="",
stream=True,
additional_fn=None,
):
"""
发送至chatGPT,流式获取输出。
用于基础的对话功能。
inputs 是本次问询的输入
top_p, temperature是chatGPT的内部调优参数
history 是之前的对话列表注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
additional_fn代表点击的哪个按钮,按钮见functional.py
"""
if len(APIKEY) == 0:
raise RuntimeError(f"APIKEY为空,请检查配置文件的{APIKEY}")
if inputs == "":
inputs = "你好👋"
if additional_fn is not None:
from core_functional import handle_core_functionality
inputs, history = handle_core_functionality(
additional_fn, inputs, history, chatbot
)
logging.info(f"[raw_input] {inputs}")
chatbot.append((inputs, ""))
yield from update_ui(
chatbot=chatbot, history=history, msg="等待响应"
) # 刷新界面
# check mis-behavior
if is_the_upload_folder(inputs):
chatbot[-1] = (
inputs,
f"[Local Message] 检测到操作错误!当您上传文档之后,需点击“**函数插件区**”按钮进行处理,请勿点击“提交”按钮或者“基础功能区”按钮。",
)
yield from update_ui(
chatbot=chatbot, history=history, msg="正常"
) # 刷新界面
time.sleep(2)
headers, playload = generate_message(
input=inputs,
model=llm_kwargs["llm_model"],
key=APIKEY,
history=history,
max_output_token=max_output_token,
system_prompt=system_prompt,
temperature=llm_kwargs["temperature"],
)
history.append(inputs)
history.append("")
retry = 0
while True:
try:
from .bridge_all import model_info
endpoint = model_info[llm_kwargs["llm_model"]]["endpoint"]
if not disable_proxy:
response = requests.post(
endpoint,
headers=headers,
proxies=proxies,
json=playload,
stream=True,
timeout=TIMEOUT_SECONDS,
)
else:
response = requests.post(
endpoint,
headers=headers,
json=playload,
stream=True,
timeout=TIMEOUT_SECONDS,
)
break
except:
retry += 1
chatbot[-1] = (chatbot[-1][0], timeout_bot_msg)
retry_msg = (
f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
)
yield from update_ui(
chatbot=chatbot, history=history, msg="请求超时" + retry_msg
) # 刷新界面
if retry > MAX_RETRY:
raise TimeoutError
gpt_replying_buffer = ""
stream_response = response.iter_lines()
while True:
try:
chunk = next(stream_response)
except StopIteration:
break
except requests.exceptions.ConnectionError:
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
response_text, finish_reason = decode_chunk(chunk)
# 返回的数据流第一次为空,继续等待
if response_text == "" and finish_reason != "False":
status_text = f"finish_reason: {finish_reason}"
yield from update_ui(
chatbot=chatbot, history=history, msg=status_text
)
continue
if chunk:
try:
if response_text == "API_ERROR" and (
finish_reason != "False" or finish_reason != "stop"
):
chunk = get_full_error(chunk, stream_response)
chunk_decoded = chunk.decode()
chatbot[-1] = (
chatbot[-1][0],
"[Local Message] {finish_reason},获得以下报错信息:\n"
+ chunk_decoded,
)
yield from update_ui(
chatbot=chatbot,
history=history,
msg="API异常:" + chunk_decoded,
) # 刷新界面
print(chunk_decoded)
return
if finish_reason == "stop":
logging.info(f"[response] {gpt_replying_buffer}")
break
status_text = f"finish_reason: {finish_reason}"
gpt_replying_buffer += response_text
# 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
history[-1] = gpt_replying_buffer
chatbot[-1] = (history[-2], history[-1])
yield from update_ui(
chatbot=chatbot, history=history, msg=status_text
) # 刷新界面
except Exception as e:
yield from update_ui(
chatbot=chatbot, history=history, msg="Json解析不合常规"
) # 刷新界面
chunk = get_full_error(chunk, stream_response)
chunk_decoded = chunk.decode()
chatbot[-1] = (
chatbot[-1][0],
"[Local Message] 解析错误,获得以下报错信息:\n" + chunk_decoded,
)
yield from update_ui(
chatbot=chatbot, history=history, msg="Json异常" + chunk_decoded
) # 刷新界面
print(chunk_decoded)
return
return predict_no_ui_long_connection, predict

查看文件

@@ -1,7 +1,8 @@
https://public.agent-matrix.com/publish/gradio-3.32.9-py3-none-any.whl
https://public.agent-matrix.com/publish/gradio-3.32.10-py3-none-any.whl
fastapi==0.110
gradio-client==0.8
pypdf2==2.12.1
zhipuai>=2
zhipuai==2.0.1
tiktoken>=0.3.3
requests[socks]
pydantic==2.5.2
@@ -22,8 +23,10 @@ pyautogen
colorama
Markdown
pygments
edge-tts
pymupdf
openai
rjsmin
arxiv
numpy
rich
rich

查看文件

@@ -46,6 +46,16 @@ code_highlight_configs_block_mermaid = {
},
}
mathpatterns = {
r"(?<!\\|\$)(\$)([^\$]+)(\$)": {"allow_multi_lines": False}, #  $...$
r"(?<!\\)(\$\$)([^\$]+)(\$\$)": {"allow_multi_lines": True}, # $$...$$
r"(?<!\\)(\\\[)(.+?)(\\\])": {"allow_multi_lines": False}, # \[...\]
r'(?<!\\)(\\\()(.+?)(\\\))': {'allow_multi_lines': False}, # \(...\)
# r'(?<!\\)(\\begin{([a-z]+?\*?)})(.+?)(\\end{\2})': {'allow_multi_lines': True}, # \begin...\end
# r'(?<!\\)(\$`)([^`]+)(`\$)': {'allow_multi_lines': False}, # $`...`$
}
def tex2mathml_catch_exception(content, *args, **kwargs):
try:
content = tex2mathml(content, *args, **kwargs)
@@ -96,14 +106,7 @@ def is_equation(txt):
return False
if "$" not in txt and "\\[" not in txt:
return False
mathpatterns = {
r"(?<!\\|\$)(\$)([^\$]+)(\$)": {"allow_multi_lines": False}, #  $...$
r"(?<!\\)(\$\$)([^\$]+)(\$\$)": {"allow_multi_lines": True}, # $$...$$
r"(?<!\\)(\\\[)(.+?)(\\\])": {"allow_multi_lines": False}, # \[...\]
# r'(?<!\\)(\\\()(.+?)(\\\))': {'allow_multi_lines': False}, # \(...\)
# r'(?<!\\)(\\begin{([a-z]+?\*?)})(.+?)(\\end{\2})': {'allow_multi_lines': True}, # \begin...\end
# r'(?<!\\)(\$`)([^`]+)(`\$)': {'allow_multi_lines': False}, # $`...`$
}
matches = []
for pattern, property in mathpatterns.items():
flags = re.ASCII | re.DOTALL if property["allow_multi_lines"] else re.ASCII
@@ -207,6 +210,118 @@ def fix_code_segment_indent(txt):
return txt
def fix_dollar_sticking_bug(txt):
"""
修复不标准的dollar公式符号的问题
"""
txt_result = ""
single_stack_height = 0
double_stack_height = 0
while True:
while True:
index = txt.find('$')
if index == -1:
txt_result += txt
return txt_result
if single_stack_height > 0:
if txt[:(index+1)].find('\n') > 0 or txt[:(index+1)].find('<td>') > 0 or txt[:(index+1)].find('</td>') > 0:
print('公式之中出现了异常 (Unexpect element in equation)')
single_stack_height = 0
txt_result += ' $'
continue
if double_stack_height > 0:
if txt[:(index+1)].find('\n\n') > 0:
print('公式之中出现了异常 (Unexpect element in equation)')
double_stack_height = 0
txt_result += '$$'
continue
is_double = (txt[index+1] == '$')
if is_double:
if single_stack_height != 0:
# add a padding
txt = txt[:(index+1)] + " " + txt[(index+1):]
continue
if double_stack_height == 0:
double_stack_height = 1
else:
double_stack_height = 0
txt_result += txt[:(index+2)]
txt = txt[(index+2):]
else:
if double_stack_height != 0:
# print(txt[:(index)])
print('发现异常嵌套公式')
if single_stack_height == 0:
single_stack_height = 1
else:
single_stack_height = 0
# print(txt[:(index)])
txt_result += txt[:(index+1)]
txt = txt[(index+1):]
break
def markdown_convertion_for_file(txt):
"""
将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
"""
from themes.theme import advanced_css
pre = f"""
<!DOCTYPE html><head><meta charset="utf-8"><title>GPT-Academic输出文档</title><style>{advanced_css}</style></head>
<body>
<div class="test_temp1" style="width:10%; height: 500px; float:left;"></div>
<div class="test_temp2" style="width:80%;padding: 40px;float:left;padding-left: 20px;padding-right: 20px;box-shadow: rgba(0, 0, 0, 0.2) 0px 0px 8px 8px;border-radius: 10px;">
<div class="markdown-body">
"""
suf = """
</div>
</div>
<div class="test_temp3" style="width:10%; height: 500px; float:left;"></div>
</body>
"""
if txt.startswith(pre) and txt.endswith(suf):
# print('警告,输入了已经经过转化的字符串,二次转化可能出问题')
return txt # 已经被转化过,不需要再次转化
find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
txt = fix_markdown_indent(txt)
convert_stage_1 = fix_dollar_sticking_bug(txt)
# convert everything to html format
convert_stage_2 = markdown.markdown(
text=convert_stage_1,
extensions=[
"sane_lists",
"tables",
"mdx_math",
"pymdownx.superfences",
"pymdownx.highlight",
],
extension_configs={**markdown_extension_configs, **code_highlight_configs},
)
def repl_fn(match):
content = match.group(2)
return f'<script type="math/tex">{content}</script>'
pattern = "|".join([pattern for pattern, property in mathpatterns.items() if not property["allow_multi_lines"]])
pattern = re.compile(pattern, flags=re.ASCII)
convert_stage_3 = pattern.sub(repl_fn, convert_stage_2)
convert_stage_4 = markdown_bug_hunt(convert_stage_3)
# 2. convert to rendered equation
convert_stage_5, n = re.subn(
find_equation_pattern, replace_math_render, convert_stage_4, flags=re.DOTALL
)
# cat them together
return pre + convert_stage_5 + suf
@lru_cache(maxsize=128) # 使用 lru缓存 加快转换速度
def markdown_convertion(txt):
"""
@@ -358,4 +473,4 @@ def format_io(self, y):
# 输出部分
None if gpt_reply is None else markdown_convertion(gpt_reply),
)
return y
return y

查看文件

@@ -0,0 +1,25 @@
def is_full_width_char(ch):
"""判断给定的单个字符是否是全角字符"""
if '\u4e00' <= ch <= '\u9fff':
return True # 中文字符
if '\uff01' <= ch <= '\uff5e':
return True # 全角符号
if '\u3000' <= ch <= '\u303f':
return True # CJK标点符号
return False
def scolling_visual_effect(text, scroller_max_len):
text = text.\
replace('\n', '').replace('`', '.').replace(' ', '.').replace('<br/>', '.....').replace('$', '.')
place_take_cnt = 0
pointer = len(text) - 1
if len(text) < scroller_max_len:
return text
while place_take_cnt < scroller_max_len and pointer > 0:
if is_full_width_char(text[pointer]): place_take_cnt += 2
else: place_take_cnt += 1
pointer -= 1
return text[pointer:]

查看文件

@@ -2,7 +2,7 @@ import importlib
import time
import os
from functools import lru_cache
from colorful import print亮红, print亮绿, print亮蓝
from shared_utils.colorful import print亮红, print亮绿, print亮蓝
pj = os.path.join
default_user_name = 'default_user'

查看文件

@@ -15,13 +15,13 @@ import os
def get_plugin_handle(plugin_name):
"""
e.g. plugin_name = 'crazy_functions.批量Markdown翻译->Markdown翻译指定语言'
e.g. plugin_name = 'crazy_functions.Markdown_Translate->Markdown翻译指定语言'
"""
import importlib
assert (
"->" in plugin_name
), "Example of plugin_name: crazy_functions.批量Markdown翻译->Markdown翻译指定语言"
), "Example of plugin_name: crazy_functions.Markdown_Translate->Markdown翻译指定语言"
module, fn_name = plugin_name.split("->")
f_hot_reload = getattr(importlib.import_module(module, fn_name), fn_name)
return f_hot_reload

查看文件

@@ -1,4 +1,7 @@
import json
import base64
from typing import Callable
def load_web_cookie_cache__fn_builder(customize_btns, cookies, predefined_btns)->Callable:
def load_web_cookie_cache(persistent_cookie_, cookies_):
import gradio as gr
@@ -22,7 +25,6 @@ def load_web_cookie_cache__fn_builder(customize_btns, cookies, predefined_btns)-
return ret
return load_web_cookie_cache
def assign_btn__fn_builder(customize_btns, predefined_btns, cookies, web_cookie_cache)->Callable:
def assign_btn(persistent_cookie_, cookies_, basic_btn_dropdown_, basic_fn_title, basic_fn_prefix, basic_fn_suffix, clean_up=False):
import gradio as gr
@@ -59,3 +61,84 @@ def assign_btn__fn_builder(customize_btns, predefined_btns, cookies, web_cookie_
return ret
return assign_btn
# cookies, web_cookie_cache = make_cookie_cache()
def make_cookie_cache():
# 定义 后端statecookies、前端web_cookie_cache两兄弟
import gradio as gr
from toolbox import load_chat_cookies
# 定义cookies的后端state
cookies = gr.State(load_chat_cookies())
# 定义cookies的一个孪生的前端存储区隐藏
web_cookie_cache = gr.Textbox(visible=False, elem_id="web_cookie_cache")
return cookies, web_cookie_cache
# history, history_cache, history_cache_update = make_history_cache()
def make_history_cache():
# 定义 后端statehistory、前端history_cache、后端setterhistory_cache_update三兄弟
import gradio as gr
# 定义history的后端state
history = gr.State([])
# 定义history的一个孪生的前端存储区隐藏
history_cache = gr.Textbox(visible=False, elem_id="history_cache")
# 定义history_cache->history的更新方法隐藏。在触发这个按钮时,会先执行js代码更新history_cache,然后再执行python代码更新history
def process_history_cache(history_cache):
return json.loads(history_cache)
# 另一种更简单的setter方法
history_cache_update = gr.Button("", elem_id="elem_update_history", visible=False).click(
process_history_cache, inputs=[history_cache], outputs=[history])
return history, history_cache, history_cache_update
# """
# with gr.Row():
# txt = gr.Textbox(show_label=False, placeholder="Input question here.", elem_id='user_input_main').style(container=False)
# txtx = gr.Textbox(show_label=False, placeholder="Input question here.", elem_id='user_input_main').style(container=False)
# with gr.Row():
# btn_value = "Test"
# elem_id = "TestCase"
# variant = "primary"
# input_list = [txt, txtx]
# output_list = [txt, txtx]
# input_name_list = ["txt(input)", "txtx(input)"]
# output_name_list = ["txt", "txtx"]
# js_callback = """(txt, txtx)=>{console.log(txt); console.log(txtx);}"""
# def function(txt, txtx):
# return "booo", "goooo"
# create_button_with_javascript_callback(btn_value, elem_id, variant, js_callback, input_list, output_list, function, input_name_list, output_name_list)
# """
def create_button_with_javascript_callback(btn_value, elem_id, variant, js_callback, input_list, output_list, function, input_name_list, output_name_list):
import gradio as gr
middle_ware_component = gr.Textbox(visible=False, elem_id=elem_id+'_buffer')
def get_fn_wrap():
def fn_wrap(*args):
summary_dict = {}
for name, value in zip(input_name_list, args):
summary_dict.update({name: value})
res = function(*args)
for name, value in zip(output_name_list, res):
summary_dict.update({name: value})
summary = base64.b64encode(json.dumps(summary_dict).encode('utf8')).decode("utf-8")
return (*res, summary)
return fn_wrap
btn = gr.Button(btn_value, elem_id=elem_id, variant=variant)
call_args = ""
for name in output_name_list:
call_args += f"""Data["{name}"],"""
call_args = call_args.rstrip(",")
_js_callback = """
(base64MiddleString)=>{
console.log('hello')
const stringData = atob(base64MiddleString);
let Data = JSON.parse(stringData);
call = JS_CALLBACK_GEN;
call(CALL_ARGS);
}
""".replace("JS_CALLBACK_GEN", js_callback).replace("CALL_ARGS", call_args)
btn.click(get_fn_wrap(), input_list, output_list+[middle_ware_component]).then(None, [middle_ware_component], None, _js=_js_callback)
return btn

查看文件

@@ -47,6 +47,28 @@ queue cocurrent effectiveness
import os, requests, threading, time
import uvicorn
def validate_path_safety(path_or_url, user):
from toolbox import get_conf, default_user_name
from toolbox import FriendlyException
PATH_PRIVATE_UPLOAD, PATH_LOGGING = get_conf('PATH_PRIVATE_UPLOAD', 'PATH_LOGGING')
sensitive_path = None
path_or_url = os.path.relpath(path_or_url)
if path_or_url.startswith(PATH_LOGGING): # 日志文件(按用户划分)
sensitive_path = PATH_LOGGING
elif path_or_url.startswith(PATH_PRIVATE_UPLOAD): # 用户的上传目录(按用户划分)
sensitive_path = PATH_PRIVATE_UPLOAD
elif path_or_url.startswith('tests') or path_or_url.startswith('build'): # 一个常用的测试目录
return True
else:
raise FriendlyException(f"输入文件的路径 ({path_or_url}) 存在,但位置非法。请将文件上传后再执行该任务。") # return False
if sensitive_path:
allowed_users = [user, 'autogen', 'arxiv_cache', default_user_name] # three user path that can be accessed
for user_allowed in allowed_users:
if f"{os.sep}".join(path_or_url.split(os.sep)[:2]) == os.path.join(sensitive_path, user_allowed):
return True
raise FriendlyException(f"输入文件的路径 ({path_or_url}) 存在,但属于其他用户。请将文件上传后再执行该任务。") # return False
return True
def _authorize_user(path_or_url, request, gradio_app):
from toolbox import get_conf, default_user_name
PATH_PRIVATE_UPLOAD, PATH_LOGGING = get_conf('PATH_PRIVATE_UPLOAD', 'PATH_LOGGING')
@@ -59,7 +81,7 @@ def _authorize_user(path_or_url, request, gradio_app):
if sensitive_path:
token = request.cookies.get("access-token") or request.cookies.get("access-token-unsecure")
user = gradio_app.tokens.get(token) # get user
allowed_users = [user, 'autogen', default_user_name] # three user path that can be accessed
allowed_users = [user, 'autogen', 'arxiv_cache', default_user_name] # three user path that can be accessed
for user_allowed in allowed_users:
# exact match
if f"{os.sep}".join(path_or_url.split(os.sep)[:2]) == os.path.join(sensitive_path, user_allowed):
@@ -77,7 +99,7 @@ class Server(uvicorn.Server):
self.thread = threading.Thread(target=self.run, daemon=True)
self.thread.start()
while not self.started:
time.sleep(1e-3)
time.sleep(5e-2)
def close(self):
self.should_exit = True
@@ -137,6 +159,60 @@ def start_app(app_block, CONCURRENT_COUNT, AUTHENTICATION, PORT, SSL_KEYFILE, SS
return "越权访问!"
return await endpoint(path_or_url, request)
from fastapi import Request, status
from fastapi.responses import FileResponse, RedirectResponse
@gradio_app.get("/academic_logout")
async def logout():
response = RedirectResponse(url=CUSTOM_PATH, status_code=status.HTTP_302_FOUND)
response.delete_cookie('access-token')
response.delete_cookie('access-token-unsecure')
return response
# --- --- enable TTS (text-to-speech) functionality --- ---
TTS_TYPE = get_conf("TTS_TYPE")
if TTS_TYPE != "DISABLE":
# audio generation functionality
import httpx
from fastapi import FastAPI, Request, HTTPException
from starlette.responses import Response
async def forward_request(request: Request, method: str) -> Response:
async with httpx.AsyncClient() as client:
try:
# Forward the request to the target service
if TTS_TYPE == "EDGE_TTS":
import tempfile
import edge_tts
import wave
import uuid
from pydub import AudioSegment
json = await request.json()
voice = get_conf("EDGE_TTS_VOICE")
tts = edge_tts.Communicate(text=json['text'], voice=voice)
temp_folder = tempfile.gettempdir()
temp_file_name = str(uuid.uuid4().hex)
temp_file = os.path.join(temp_folder, f'{temp_file_name}.mp3')
await tts.save(temp_file)
try:
mp3_audio = AudioSegment.from_file(temp_file, format="mp3")
mp3_audio.export(temp_file, format="wav")
with open(temp_file, 'rb') as wav_file: t = wav_file.read()
os.remove(temp_file)
return Response(content=t)
except:
raise RuntimeError("ffmpeg未安装,无法处理EdgeTTS音频。安装方法见`https://github.com/jiaaro/pydub#getting-ffmpeg-set-up`")
if TTS_TYPE == "LOCAL_SOVITS_API":
# Forward the request to the target service
TARGET_URL = get_conf("GPT_SOVITS_URL")
body = await request.body()
resp = await client.post(TARGET_URL, content=body, timeout=60)
# Return the response from the target service
return Response(content=resp.content, status_code=resp.status_code, headers=dict(resp.headers))
except httpx.RequestError as e:
raise HTTPException(status_code=400, detail=f"Request to the target service failed: {str(e)}")
@gradio_app.post("/vits")
async def forward_post_request(request: Request):
return await forward_request(request, "POST")
# --- --- app_lifespan --- ---
from contextlib import asynccontextmanager
@asynccontextmanager
@@ -154,13 +230,22 @@ def start_app(app_block, CONCURRENT_COUNT, AUTHENTICATION, PORT, SSL_KEYFILE, SS
fastapi_app = FastAPI(lifespan=app_lifespan)
fastapi_app.mount(CUSTOM_PATH, gradio_app)
# --- --- favicon --- ---
# --- --- favicon and block fastapi api reference routes --- ---
from starlette.responses import JSONResponse
if CUSTOM_PATH != '/':
from fastapi.responses import FileResponse
@fastapi_app.get("/favicon.ico")
async def favicon():
return FileResponse(app_block.favicon_path)
@fastapi_app.middleware("http")
async def middleware(request: Request, call_next):
if request.scope['path'] in ["/docs", "/redoc", "/openapi.json"]:
return JSONResponse(status_code=404, content={"message": "Not Found"})
response = await call_next(request)
return response
# --- --- uvicorn.Config --- ---
ssl_keyfile = None if SSL_KEYFILE == "" else SSL_KEYFILE
ssl_certfile = None if SSL_CERTFILE == "" else SSL_CERTFILE
@@ -208,4 +293,4 @@ def start_app(app_block, CONCURRENT_COUNT, AUTHENTICATION, PORT, SSL_KEYFILE, SS
}
requests.get(f"{app_block.local_url}startup-events", verify=app_block.ssl_verify, proxies=forbid_proxies)
app_block.is_running = True
app_block.block_thread()
app_block.block_thread()

查看文件

@@ -104,6 +104,14 @@ def extract_archive(file_path, dest_dir):
elif file_extension in [".tar", ".gz", ".bz2"]:
with tarfile.open(file_path, "r:*") as tarobj:
# 清理提取路径,移除任何不安全的元素
for member in tarobj.getmembers():
member_path = os.path.normpath(member.name)
full_path = os.path.join(dest_dir, member_path)
full_path = os.path.abspath(full_path)
if not full_path.startswith(os.path.abspath(dest_dir) + os.sep):
raise Exception(f"Attempted Path Traversal in {member.name}")
tarobj.extractall(path=dest_dir)
print("Successfully extracted tar archive to {}".format(dest_dir))

查看文件

@@ -14,7 +14,7 @@ def is_openai_api_key(key):
if len(CUSTOM_API_KEY_PATTERN) != 0:
API_MATCH_ORIGINAL = re.match(CUSTOM_API_KEY_PATTERN, key)
else:
API_MATCH_ORIGINAL = re.match(r"sk-[a-zA-Z0-9]{48}$|sess-[a-zA-Z0-9]{40}$", key)
API_MATCH_ORIGINAL = re.match(r"sk-[a-zA-Z0-9]{48}$|sk-proj-[a-zA-Z0-9]{48}$|sess-[a-zA-Z0-9]{40}$", key)
return bool(API_MATCH_ORIGINAL)

查看文件

@@ -26,6 +26,8 @@ def apply_gpt_academic_string_mask(string, mode="show_all"):
当字符串中有掩码tag时<gpt_academic_string_mask><show_...>,根据字符串要给谁看大模型,还是web渲染,对字符串进行处理,返回处理后的字符串
示意图https://mermaid.live/edit#pako:eNqlkUtLw0AUhf9KuOta0iaTplkIPlpduFJwoZEwJGNbzItpita2O6tF8QGKogXFtwu7cSHiq3-mk_oznFR8IYLgrGbuOd9hDrcCpmcR0GDW9ubNPKaBMDauuwI_A9M6YN-3y0bODwxsYos4BdMoBrTg5gwHF-d0mBH6-vqFQe58ed5m9XPW2uteX3Tubrj0ljLYcwxxR3h1zB43WeMs3G19yEM9uapDMe_NG9i2dagKw1Fee4c1D9nGEbtc-5n6HbNtJ8IyHOs8tbs7V2HrlDX2w2Y7XD_5haHEtQiNsOwfMVa_7TzsvrWIuJGo02qTrdwLk9gukQylHv3Afv1ML270s-HZUndrmW1tdA-WfvbM_jMFYuAQ6uCCxVdciTJ1CPLEITpo_GphypeouzXuw6XAmyi7JmgBLZEYlHwLB2S4gHMUO-9DH7tTnvf1CVoFFkBLSOk4QmlRTqpIlaWUHINyNFXjaQWpCYRURUKiWovBYo8X4ymEJFlECQUpqaQkJmuvWygPpg
"""
if not string:
return string
if "<gpt_academic_string_mask>" not in string: # No need to process
return string

10
tests/init_test.py 普通文件
查看文件

@@ -0,0 +1,10 @@
def validate_path():
import os, sys
os.path.dirname(__file__)
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..")
os.chdir(root_dir_assume)
sys.path.append(root_dir_assume)
validate_path() # validate path so you can run from base directory

查看文件

@@ -0,0 +1,22 @@
"""
对项目中的各个插件进行测试。运行方法:直接运行 python tests/test_plugins.py
"""
import os, sys, importlib
def validate_path():
dir_name = os.path.dirname(__file__)
root_dir_assume = os.path.abspath(dir_name + "/..")
os.chdir(root_dir_assume)
sys.path.append(root_dir_assume)
validate_path() # 返回项目根路径
if __name__ == "__main__":
plugin_test = importlib.import_module('test_utils').plugin_test
plugin_test(plugin='crazy_functions.Latex_Function->Latex翻译中文并重新编译PDF', main_input="2203.01927")

查看文件

@@ -14,12 +14,13 @@ validate_path() # validate path so you can run from base directory
if "在线模型":
if __name__ == "__main__":
from request_llms.bridge_cohere import predict_no_ui_long_connection
from request_llms.bridge_taichu import predict_no_ui_long_connection
# from request_llms.bridge_cohere import predict_no_ui_long_connection
# from request_llms.bridge_spark import predict_no_ui_long_connection
# from request_llms.bridge_zhipu import predict_no_ui_long_connection
# from request_llms.bridge_chatglm3 import predict_no_ui_long_connection
llm_kwargs = {
"llm_model": "command-r-plus",
"llm_model": "taichu",
"max_length": 4096,
"top_p": 1,
"temperature": 1,

查看文件

@@ -43,8 +43,10 @@ def validate_path():
validate_path() # validate path so you can run from base directory
from toolbox import markdown_convertion
html = markdown_convertion(md)
from shared_utils.advanced_markdown_format import markdown_convertion_for_file
with open("gpt_log/default_user/shared/2024-04-22-01-27-43.zip.extract/translated_markdown.md", "r", encoding="utf-8") as f:
md = f.read()
html = markdown_convertion_for_file(md)
# print(html)
with open("test.html", "w", encoding="utf-8") as f:
f.write(html)

查看文件

@@ -2,30 +2,27 @@
对项目中的各个插件进行测试。运行方法:直接运行 python tests/test_plugins.py
"""
import init_test
import os, sys
def validate_path():
dir_name = os.path.dirname(__file__)
root_dir_assume = os.path.abspath(dir_name + "/..")
os.chdir(root_dir_assume)
sys.path.append(root_dir_assume)
validate_path() # 返回项目根路径
if __name__ == "__main__":
from tests.test_utils import plugin_test
from test_utils import plugin_test
plugin_test(plugin='crazy_functions.SourceCode_Comment->注释Python项目', main_input="build/test/python_comment")
# plugin_test(plugin='crazy_functions.Internet_GPT->连接网络回答问题', main_input="谁是应急食品?")
# plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"})
# plugin_test(plugin='crazy_functions.Latex输出PDF->Latex翻译中文并重新编译PDF', main_input="2307.07522")
# plugin_test(plugin='crazy_functions.Latex_Function->Latex翻译中文并重新编译PDF', main_input="2307.07522")
plugin_test(
plugin="crazy_functions.Latex输出PDF->Latex翻译中文并重新编译PDF",
main_input="G:/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix",
)
# plugin_test(plugin='crazy_functions.PDF_Translate->批量翻译PDF文档', main_input='build/pdf/t1.pdf')
# plugin_test(
# plugin="crazy_functions.Latex_Function->Latex翻译中文并重新编译PDF",
# main_input="G:/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix",
# )
# plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep')
@@ -35,15 +32,15 @@ if __name__ == "__main__":
# plugin_test(plugin='crazy_functions.命令行助手->命令行助手', main_input='查看当前的docker容器列表')
# plugin_test(plugin='crazy_functions.解析项目源代码->解析一个Python项目', main_input="crazy_functions/test_project/python/dqn")
# plugin_test(plugin='crazy_functions.SourceCode_Analyse->解析一个Python项目', main_input="crazy_functions/test_project/python/dqn")
# plugin_test(plugin='crazy_functions.解析项目源代码->解析一个C项目', main_input="crazy_functions/test_project/cpp/cppipc")
# plugin_test(plugin='crazy_functions.SourceCode_Analyse->解析一个C项目', main_input="crazy_functions/test_project/cpp/cppipc")
# plugin_test(plugin='crazy_functions.Latex全文润色->Latex英文润色', main_input="crazy_functions/test_project/latex/attention")
# plugin_test(plugin='crazy_functions.批量Markdown翻译->Markdown中译英', main_input="README.md")
# plugin_test(plugin='crazy_functions.Markdown_Translate->Markdown中译英', main_input="README.md")
# plugin_test(plugin='crazy_functions.批量翻译PDF文档_多线程->批量翻译PDF文档', main_input='crazy_functions/test_project/pdf_and_word/aaai.pdf')
# plugin_test(plugin='crazy_functions.PDF_Translate->批量翻译PDF文档', main_input='crazy_functions/test_project/pdf_and_word/aaai.pdf')
# plugin_test(plugin='crazy_functions.谷歌检索小助手->谷歌检索小助手', main_input="https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=auto+reinforcement+learning&btnG=")
@@ -58,7 +55,7 @@ if __name__ == "__main__":
# plugin_test(plugin='crazy_functions.数学动画生成manim->动画生成', main_input="A ball split into 2, and then split into 4, and finally split into 8.")
# for lang in ["English", "French", "Japanese", "Korean", "Russian", "Italian", "German", "Portuguese", "Arabic"]:
# plugin_test(plugin='crazy_functions.批量Markdown翻译->Markdown翻译指定语言', main_input="README.md", advanced_arg={"advanced_arg": lang})
# plugin_test(plugin='crazy_functions.Markdown_Translate->Markdown翻译指定语言', main_input="README.md", advanced_arg={"advanced_arg": lang})
# plugin_test(plugin='crazy_functions.知识库文件注入->知识库文件注入', main_input="./")
@@ -66,7 +63,7 @@ if __name__ == "__main__":
# plugin_test(plugin='crazy_functions.知识库文件注入->读取知识库作答', main_input="远程云服务器部署?")
# plugin_test(plugin='crazy_functions.Latex输出PDF->Latex翻译中文并重新编译PDF', main_input="2210.03629")
# plugin_test(plugin='crazy_functions.Latex_Function->Latex翻译中文并重新编译PDF', main_input="2210.03629")
# advanced_arg = {"advanced_arg":"--llm_to_learn=gpt-3.5-turbo --prompt_prefix='根据下面的服装类型提示,想象一个穿着者,对这个人外貌、身处的环境、内心世界、人设进行描写。要求100字以内,用第二人称。' --system_prompt=''" }
# plugin_test(plugin='crazy_functions.chatglm微调工具->微调数据集生成', main_input='build/dev.json', advanced_arg=advanced_arg)

查看文件

@@ -0,0 +1,342 @@
import init_test
from toolbox import CatchException, update_ui
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from request_llms.bridge_all import predict_no_ui_long_connection
import datetime
import re
from textwrap import dedent
# TODO: 解决缩进问题
find_function_end_prompt = '''
Below is a page of code that you need to read. This page may not yet complete, you job is to split this page to sperate functions, class functions etc.
- Provide the line number where the first visible function ends.
- Provide the line number where the next visible function begins.
- If there are no other functions in this page, you should simply return the line number of the last line.
- Only focus on functions declared by `def` keyword. Ignore inline functions. Ignore function calls.
------------------ Example ------------------
INPUT:
```
L0000 |import sys
L0001 |import re
L0002 |
L0003 |def trimmed_format_exc():
L0004 | import os
L0005 | import traceback
L0006 | str = traceback.format_exc()
L0007 | current_path = os.getcwd()
L0008 | replace_path = "."
L0009 | return str.replace(current_path, replace_path)
L0010 |
L0011 |
L0012 |def trimmed_format_exc_markdown():
L0013 | ...
L0014 | ...
```
OUTPUT:
```
<first_function_end_at>L0009</first_function_end_at>
<next_function_begin_from>L0012</next_function_begin_from>
```
------------------ End of Example ------------------
------------------ the real INPUT you need to process NOW ------------------
```
{THE_TAGGED_CODE}
```
'''
revise_funtion_prompt = '''
You need to read the following code, and revise the code according to following instructions:
1. You should analyze the purpose of the functions (if there are any).
2. You need to add docstring for the provided functions (if there are any).
Be aware:
1. You must NOT modify the indent of code.
2. You are NOT authorized to change or translate non-comment code, and you are NOT authorized to add empty lines either.
3. Use English to add comments and docstrings. Do NOT translate Chinese that is already in the code.
------------------ Example ------------------
INPUT:
```
L0000 |
L0001 |def zip_result(folder):
L0002 | t = gen_time_str()
L0003 | zip_folder(folder, get_log_folder(), f"result.zip")
L0004 | return os.path.join(get_log_folder(), f"result.zip")
L0005 |
L0006 |
```
OUTPUT:
<instruction_1_purpose>
This function compresses a given folder, and return the path of the resulting `zip` file.
</instruction_1_purpose>
<instruction_2_revised_code>
```
def zip_result(folder):
"""
Compresses the specified folder into a zip file and stores it in the log folder.
Args:
folder (str): The path to the folder that needs to be compressed.
Returns:
str: The path to the created zip file in the log folder.
"""
t = gen_time_str()
zip_folder(folder, get_log_folder(), f"result.zip") # ⭐ Execute the zipping of folder
return os.path.join(get_log_folder(), f"result.zip")
```
</instruction_2_revised_code>
------------------ End of Example ------------------
------------------ the real INPUT you need to process NOW ------------------
```
{THE_CODE}
```
{INDENT_REMINDER}
'''
class ContextWindowManager():
def __init__(self, llm_kwargs) -> None:
self.full_context = []
self.full_context_with_line_no = []
self.current_page_start = 0
self.page_limit = 100 # 100 lines of code each page
self.ignore_limit = 20
self.llm_kwargs = llm_kwargs
def generate_tagged_code_from_full_context(self):
for i, code in enumerate(self.full_context):
number = i
padded_number = f"{number:04}"
result = f"L{padded_number}"
self.full_context_with_line_no.append(f"{result} | {code}")
return self.full_context_with_line_no
def read_file(self, path):
with open(path, 'r', encoding='utf8') as f:
self.full_context = f.readlines()
self.full_context_with_line_no = self.generate_tagged_code_from_full_context()
def find_next_function_begin(self, tagged_code:list, begin_and_end):
begin, end = begin_and_end
THE_TAGGED_CODE = ''.join(tagged_code)
self.llm_kwargs['temperature'] = 0
result = predict_no_ui_long_connection(
inputs=find_function_end_prompt.format(THE_TAGGED_CODE=THE_TAGGED_CODE),
llm_kwargs=self.llm_kwargs,
history=[],
sys_prompt="",
observe_window=[],
console_slience=True
)
def extract_number(text):
# 使用正则表达式匹配模式
match = re.search(r'<next_function_begin_from>L(\d+)</next_function_begin_from>', text)
if match:
# 提取匹配的数字部分并转换为整数
return int(match.group(1))
return None
line_no = extract_number(result)
if line_no is not None:
return line_no
else:
raise RuntimeError
return end
def _get_next_window(self):
#
current_page_start = self.current_page_start
if self.current_page_start == len(self.full_context) + 1:
raise StopIteration
# 如果剩余的行数非常少,一鼓作气处理掉
if len(self.full_context) - self.current_page_start < self.ignore_limit:
future_page_start = len(self.full_context) + 1
self.current_page_start = future_page_start
return current_page_start, future_page_start
tagged_code = self.full_context_with_line_no[ self.current_page_start: self.current_page_start + self.page_limit]
line_no = self.find_next_function_begin(tagged_code, [self.current_page_start, self.current_page_start + self.page_limit])
if line_no > len(self.full_context) - 5:
line_no = len(self.full_context) + 1
future_page_start = line_no
self.current_page_start = future_page_start
# ! consider eof
return current_page_start, future_page_start
def dedent(self, text):
"""Remove any common leading whitespace from every line in `text`.
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Find the largest common whitespace between current line and previous
# winner.
else:
for i, (x, y) in enumerate(zip(margin, indent)):
if x != y:
margin = margin[:i]
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text, len(margin)
def get_next_batch(self):
current_page_start, future_page_start = self._get_next_window()
return self.full_context[current_page_start: future_page_start], current_page_start, future_page_start
def tag_code(self, fn):
code = ''.join(fn)
_, n_indent = self.dedent(code)
indent_reminder = "" if n_indent == 0 else "(Reminder: as you can see, this piece of code has indent made up with {n_indent} whitespace, please preseve them in the OUTPUT.)"
self.llm_kwargs['temperature'] = 0
result = predict_no_ui_long_connection(
inputs=revise_funtion_prompt.format(THE_CODE=code, INDENT_REMINDER=indent_reminder),
llm_kwargs=self.llm_kwargs,
history=[],
sys_prompt="",
observe_window=[],
console_slience=True
)
def get_code_block(reply):
import re
pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
matches = re.findall(pattern, reply) # find all code blocks in text
if len(matches) == 1:
return matches[0].strip('python') # code block
return None
code_block = get_code_block(result)
if code_block is not None:
code_block = self.sync_and_patch(original=code, revised=code_block)
return code_block
else:
return code
def sync_and_patch(self, original, revised):
"""Ensure the number of pre-string empty lines in revised matches those in original."""
def count_leading_empty_lines(s, reverse=False):
"""Count the number of leading empty lines in a string."""
lines = s.split('\n')
if reverse: lines = list(reversed(lines))
count = 0
for line in lines:
if line.strip() == '':
count += 1
else:
break
return count
original_empty_lines = count_leading_empty_lines(original)
revised_empty_lines = count_leading_empty_lines(revised)
if original_empty_lines > revised_empty_lines:
additional_lines = '\n' * (original_empty_lines - revised_empty_lines)
revised = additional_lines + revised
elif original_empty_lines < revised_empty_lines:
lines = revised.split('\n')
revised = '\n'.join(lines[revised_empty_lines - original_empty_lines:])
original_empty_lines = count_leading_empty_lines(original, reverse=True)
revised_empty_lines = count_leading_empty_lines(revised, reverse=True)
if original_empty_lines > revised_empty_lines:
additional_lines = '\n' * (original_empty_lines - revised_empty_lines)
revised = revised + additional_lines
elif original_empty_lines < revised_empty_lines:
lines = revised.split('\n')
revised = '\n'.join(lines[:-(revised_empty_lines - original_empty_lines)])
return revised
from toolbox import get_plugin_default_kwargs
llm_kwargs = get_plugin_default_kwargs()["llm_kwargs"]
cwm = ContextWindowManager(llm_kwargs)
cwm.read_file(path="./test.py")
output_buf = ""
with open('temp.py', 'w+', encoding='utf8') as f:
while True:
try:
next_batch, line_no_start, line_no_end = cwm.get_next_batch()
result = cwm.tag_code(next_batch)
f.write(result)
output_buf += result
except StopIteration:
next_batch, line_no_start, line_no_end = [], -1, -1
break
print('-------------------------------------------')
print(''.join(next_batch))
print('-------------------------------------------')
print(cwm)

17
tests/test_safe_pickle.py 普通文件
查看文件

@@ -0,0 +1,17 @@
def validate_path():
import os, sys
os.path.dirname(__file__)
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..")
os.chdir(root_dir_assume)
sys.path.append(root_dir_assume)
validate_path() # validate path so you can run from base directory
from crazy_functions.latex_fns.latex_pickle_io import objdump, objload
from crazy_functions.latex_fns.latex_actions import LatexPaperFileGroup, LatexPaperSplit
pfg = LatexPaperFileGroup()
pfg.get_token_num = None
pfg.target = "target_elem"
x = objdump(pfg)
t = objload()
print(t.target)

查看文件

@@ -0,0 +1,102 @@
def validate_path():
import os, sys
os.path.dirname(__file__)
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..")
os.chdir(root_dir_assume)
sys.path.append(root_dir_assume)
validate_path() # validate path so you can run from base directory
def write_chat_to_file(chatbot, history=None, file_name=None):
"""
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
"""
import os
import time
from themes.theme import advanced_css
# debug
import pickle
# def objdump(obj, file="objdump.tmp"):
# with open(file, "wb+") as f:
# pickle.dump(obj, f)
# return
def objload(file="objdump.tmp"):
import os
if not os.path.exists(file):
return
with open(file, "rb") as f:
return pickle.load(f)
# objdump((chatbot, history))
chatbot, history = objload()
with open("test.html", 'w', encoding='utf8') as f:
from textwrap import dedent
form = dedent("""
<!DOCTYPE html><head><meta charset="utf-8"><title>对话存档</title><style>{CSS}</style></head>
<body>
<div class="test_temp1" style="width:10%; height: 500px; float:left;"></div>
<div class="test_temp2" style="width:80%;padding: 40px;float:left;padding-left: 20px;padding-right: 20px;box-shadow: rgba(0, 0, 0, 0.2) 0px 0px 8px 8px;border-radius: 10px;">
<div class="chat-body" style="display: flex;justify-content: center;flex-direction: column;align-items: center;flex-wrap: nowrap;">
{CHAT_PREVIEW}
<div></div>
<div></div>
<div style="text-align: center;width:80%;padding: 0px;float:left;padding-left:20px;padding-right:20px;box-shadow: rgba(0, 0, 0, 0.05) 0px 0px 1px 2px;border-radius: 1px;">对话(原始数据)</div>
{HISTORY_PREVIEW}
</div>
</div>
<div class="test_temp3" style="width:10%; height: 500px; float:left;"></div>
</body>
""")
qa_from = dedent("""
<div class="QaBox" style="width:80%;padding: 20px;margin-bottom: 20px;box-shadow: rgb(0 255 159 / 50%) 0px 0px 1px 2px;border-radius: 4px;">
<div class="Question" style="border-radius: 2px;">{QUESTION}</div>
<hr color="blue" style="border-top: dotted 2px #ccc;">
<div class="Answer" style="border-radius: 2px;">{ANSWER}</div>
</div>
""")
history_from = dedent("""
<div class="historyBox" style="width:80%;padding: 0px;float:left;padding-left:20px;padding-right:20px;box-shadow: rgba(0, 0, 0, 0.05) 0px 0px 1px 2px;border-radius: 1px;">
<div class="entry" style="border-radius: 2px;">{ENTRY}</div>
</div>
""")
CHAT_PREVIEW_BUF = ""
for i, contents in enumerate(chatbot):
question, answer = contents[0], contents[1]
if question is None: question = ""
try: question = str(question)
except: question = ""
if answer is None: answer = ""
try: answer = str(answer)
except: answer = ""
CHAT_PREVIEW_BUF += qa_from.format(QUESTION=question, ANSWER=answer)
HISTORY_PREVIEW_BUF = ""
for h in history:
HISTORY_PREVIEW_BUF += history_from.format(ENTRY=h)
html_content = form.format(CHAT_PREVIEW=CHAT_PREVIEW_BUF, HISTORY_PREVIEW=HISTORY_PREVIEW_BUF, CSS=advanced_css)
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_content, 'lxml')
# 提取QaBox信息
qa_box_list = []
qa_boxes = soup.find_all("div", class_="QaBox")
for box in qa_boxes:
question = box.find("div", class_="Question").get_text(strip=False)
answer = box.find("div", class_="Answer").get_text(strip=False)
qa_box_list.append({"Question": question, "Answer": answer})
# 提取historyBox信息
history_box_list = []
history_boxes = soup.find_all("div", class_="historyBox")
for box in history_boxes:
entry = box.find("div", class_="entry").get_text(strip=False)
history_box_list.append(entry)
print('')
write_chat_to_file(None, None, None)

58
tests/test_searxng.py 普通文件
查看文件

@@ -0,0 +1,58 @@
def validate_path():
import os, sys
os.path.dirname(__file__)
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..")
os.chdir(root_dir_assume)
sys.path.append(root_dir_assume)
validate_path() # validate path so you can run from base directory
from toolbox import get_conf
import requests
def searxng_request(query, proxies, categories='general', searxng_url=None, engines=None):
url = 'http://localhost:50001/'
if engines is None:
engine = 'bing,'
if categories == 'general':
params = {
'q': query, # 搜索查询
'format': 'json', # 输出格式为JSON
'language': 'zh', # 搜索语言
'engines': engine,
}
elif categories == 'science':
params = {
'q': query, # 搜索查询
'format': 'json', # 输出格式为JSON
'language': 'zh', # 搜索语言
'categories': 'science'
}
else:
raise ValueError('不支持的检索类型')
headers = {
'Accept-Language': 'zh-CN,zh;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'X-Forwarded-For': '112.112.112.112',
'X-Real-IP': '112.112.112.112'
}
results = []
response = requests.post(url, params=params, headers=headers, proxies=proxies, timeout=30)
if response.status_code == 200:
json_result = response.json()
for result in json_result['results']:
item = {
"title": result.get("title", ""),
"content": result.get("content", ""),
"link": result["url"],
}
print(result['engines'])
results.append(item)
return results
else:
if response.status_code == 429:
raise ValueError("Searxng在线搜索服务当前使用人数太多,请稍后。")
else:
raise ValueError("在线搜索失败,状态码: " + str(response.status_code) + '\t' + response.content.decode('utf-8'))
res = searxng_request("vr environment", None, categories='science', searxng_url=None, engines=None)
print(res)

查看文件

@@ -1,3 +1,9 @@
#plugin_arg_menu {
transform: translate(-50%, -50%);
border: dashed;
}
/* hide remove all button */
.remove-all.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e {
visibility: hidden;
@@ -38,6 +44,7 @@
left: calc(100% + 3px);
top: 0;
display: flex;
flex-direction: column;
justify-content: space-between;
}
/* .message-btn-row-leading, .message-btn-row-trailing {
@@ -108,6 +115,7 @@
border-width: thin;
user-select: none;
padding-left: 2%;
text-align: center;
}
.floating-component #input-panel2 {
@@ -117,3 +125,149 @@
border-width: thin;
border-top-width: 0;
}
.floating-component #plugin_arg_panel {
border-top-left-radius: 0px;
border-top-right-radius: 0px;
border: solid;
border-width: thin;
border-top-width: 0;
}
.floating-component #edit-panel {
border-top-left-radius: 0px;
border-top-right-radius: 0px;
border: solid;
border-width: thin;
border-top-width: 0;
}
.welcome-card-container {
text-align: center;
margin: 0 auto;
display: flex;
position: absolute;
width: inherit;
padding: 50px;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
flex-wrap: wrap;
justify-content: center;
transition: opacity 1s ease-in-out;
opacity: 0;
}
.welcome-card-container.show {
opacity: 1;
}
.welcome-card-container.hide {
opacity: 0;
}
.welcome-card {
border-radius: 10px;
box-shadow: 0px 0px 6px 3px #e5e7eb6b;
padding: 15px;
margin: 10px;
flex: 1 0 calc(30% - 5px);
transform: rotateY(0deg);
transition: transform 0.1s;
transform-style: preserve-3d;
}
.welcome-card.show {
transform: rotateY(0deg);
}
.welcome-card.hide {
transform: rotateY(90deg);
}
.welcome-title {
font-size: 40px;
padding: 20px;
margin: 10px;
flex: 0 0 calc(90%);
}
.welcome-card-title {
font-size: 20px;
margin: 2px;
flex: 0 0 calc(95%);
padding-bottom: 8px;
padding-top: 8px;
padding-right: 8px;
padding-left: 8px;
display: flex;
justify-content: center;
}
.welcome-svg {
padding-right: 10px;
}
.welcome-title-text {
text-wrap: nowrap;
}
.welcome-content {
text-wrap: balance;
height: 55px;
display: flex;
align-items: center;
}
#gpt-submit-row {
display: flex;
gap: 0 !important;
border-radius: var(--button-large-radius);
border: var(--button-border-width) solid var(--button-primary-border-color);
/* background: var(--button-primary-background-fill); */
background: var(--button-primary-background-fill-hover);
color: var(--button-primary-text-color);
box-shadow: var(--button-shadow);
transition: var(--button-transition);
display: flex;
}
#gpt-submit-row:hover {
border-color: var(--button-primary-border-color-hover);
/* background: var(--button-primary-background-fill-hover); */
/* color: var(--button-primary-text-color-hover); */
}
#gpt-submit-row button#elem_submit_visible {
border-top-right-radius: 0px;
border-bottom-right-radius: 0px;
box-shadow: none !important;
flex-grow: 1;
}
#gpt-submit-row #gpt-submit-dropdown {
border-top-left-radius: 0px;
border-bottom-left-radius: 0px;
border-left: 0.5px solid #FFFFFF88 !important;
display: flex;
overflow: unset !important;
max-width: 40px !important;
min-width: 40px !important;
}
#gpt-submit-row #gpt-submit-dropdown input {
pointer-events: none;
opacity: 0; /* 隐藏输入框 */
width: 0;
margin-inline: 0;
cursor: pointer;
}
#gpt-submit-row #gpt-submit-dropdown label {
display: flex;
width: 0;
}
#gpt-submit-row #gpt-submit-dropdown label div.wrap {
background: none;
box-shadow: none;
border: none;
}
#gpt-submit-row #gpt-submit-dropdown label div.wrap div.wrap-inner {
background: none;
padding-inline: 0;
height: 100%;
}
#gpt-submit-row #gpt-submit-dropdown svg.dropdown-arrow {
transform: scale(2) translate(4.5px, -0.3px);
}
#gpt-submit-row #gpt-submit-dropdown > *:hover {
cursor: context-menu;
}

查看文件

@@ -1,3 +1,6 @@
// 标志位
enable_tts = false;
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
// 第 1 部分: 工具函数
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
@@ -7,6 +10,9 @@ function push_data_to_gradio_component(DAT, ELEM_ID, TYPE) {
if (TYPE == "str") {
// convert dat to string: do nothing
}
else if (TYPE == "obj") {
// convert dat to string: do nothing
}
else if (TYPE == "no_conversion") {
// no nothing
}
@@ -254,11 +260,22 @@ function cancel_loading_status() {
// 第 2 部分: 复制按钮
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
function addCopyButton(botElement) {
var allow_auto_read_continously = true;
var allow_auto_read_tts_flag = false;
function addCopyButton(botElement, index, is_last_in_arr) {
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
// Copy bot button
const copiedIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><polyline points="20 6 9 17 4 12"></polyline></svg></span>';
const copyIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"></path></svg></span>';
// const audioIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"></path></svg></span>';
const audioIcon = '<span><svg t="1713628577799" fill="currentColor" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="4587" width=".9em" height=".9em"><path d="M113.7664 540.4672c0-219.9552 178.2784-398.2336 398.2336-398.2336S910.2336 320.512 910.2336 540.4672v284.4672c0 31.4368-25.4976 56.9344-56.9344 56.9344h-56.9344c-31.4368 0-56.9344-25.4976-56.9344-56.9344V597.2992c0-31.4368 25.4976-56.9344 56.9344-56.9344h56.9344c0-188.5184-152.7808-341.2992-341.2992-341.2992S170.7008 351.9488 170.7008 540.4672h56.9344c31.4368 0 56.9344 25.4976 56.9344 56.9344v227.5328c0 31.4368-25.4976 56.9344-56.9344 56.9344h-56.9344c-31.4368 0-56.9344-25.4976-56.9344-56.9344V540.4672z" p-id="4588"></path></svg></span>';
// const cancelAudioIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"></path></svg></span>';
// 此功能没准备好
if (allow_auto_read_continously && is_last_in_arr && allow_auto_read_tts_flag) {
process_latest_text_output(botElement.innerText, index);
}
const messageBtnColumnElement = botElement.querySelector('.message-btn-row');
if (messageBtnColumnElement) {
@@ -273,6 +290,7 @@ function addCopyButton(botElement) {
copyButton.addEventListener('click', async () => {
const textToCopy = botElement.innerText;
try {
// push_text_to_audio(textToCopy).catch(console.error);
if ("clipboard" in navigator) {
await navigator.clipboard.writeText(textToCopy);
copyButton.innerHTML = copiedIcon;
@@ -299,9 +317,38 @@ function addCopyButton(botElement) {
console.error("Copy failed: ", error);
}
});
if (enable_tts){
var audioButton = document.createElement('button');
audioButton.classList.add('audio-toggle-btn');
audioButton.innerHTML = audioIcon;
audioButton.addEventListener('click', async () => {
if (audioPlayer.isPlaying) {
allow_auto_read_tts_flag = false;
toast_push('自动朗读已禁用。', 3000);
audioPlayer.stop();
setCookie("js_auto_read_cookie", "False", 365);
} else {
allow_auto_read_tts_flag = true;
toast_push('正在合成语音 & 自动朗读已开启 (再次点击此按钮可禁用自动朗读)。', 3000);
// toast_push('正在合成语音', 3000);
const readText = botElement.innerText;
prev_chatbot_index = index;
prev_text = readText;
prev_text_already_pushed = readText;
push_text_to_audio(readText);
setCookie("js_auto_read_cookie", "True", 365);
}
});
}
var messageBtnColumn = document.createElement('div');
messageBtnColumn.classList.add('message-btn-row');
messageBtnColumn.appendChild(copyButton);
if (enable_tts){
messageBtnColumn.appendChild(audioButton);
}
botElement.appendChild(messageBtnColumn);
}
@@ -337,7 +384,15 @@ function chatbotContentChanged(attempt = 1, force = false) {
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
for (var i = 0; i < attempt; i++) {
setTimeout(() => {
gradioApp().querySelectorAll('#gpt-chatbot .message-wrap .message.bot').forEach(addCopyButton);
const messages = gradioApp().querySelectorAll('#gpt-chatbot .message-wrap .message.bot');
messages.forEach((message, index, arr) => {
// Check if the current message is the last in the array
const is_last_in_arr = index === arr.length - 1;
// Now pass both the message element and the is_last_in_arr boolean to addCopyButton
addCopyButton(message, index, is_last_in_arr);
});
// gradioApp().querySelectorAll('#gpt-chatbot .message-wrap .message.bot').forEach(addCopyButton);
}, i === 0 ? 0 : 200);
}
// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0
@@ -621,16 +676,16 @@ function monitoring_input_box() {
if (elem_input_main) {
if (elem_input_main.querySelector("textarea")) {
register_func_paste(elem_input_main.querySelector("textarea"))
register_func_paste(elem_input_main.querySelector("textarea"));
}
}
if (elem_input_float) {
if (elem_input_float.querySelector("textarea")) {
register_func_paste(elem_input_float.querySelector("textarea"))
register_func_paste(elem_input_float.querySelector("textarea"));
}
}
if (elem_chatbot) {
register_func_drag(elem_chatbot)
register_func_drag(elem_chatbot);
}
}
@@ -737,10 +792,30 @@ function minor_ui_adjustment() {
}
setInterval(function () {
auto_hide_toolbar()
auto_hide_toolbar();
}, 200); // 每50毫秒执行一次
}
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
// 对提交按钮的下拉选框做的变化
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
function ButtonWithDropdown_init() {
let submitButton = document.querySelector('button#elem_submit_visible');
let submitDropdown = document.querySelector('#gpt-submit-dropdown');
function updateDropdownWidth() {
if (submitButton) {
let setWidth = submitButton.clientWidth + submitDropdown.clientWidth;
let setLeft = -1 * submitButton.clientWidth;
document.getElementById('submit-dropdown-style')?.remove();
const styleElement = document.createElement('style');
styleElement.id = 'submit-dropdown-style';
styleElement.innerHTML = `#gpt-submit-dropdown ul.options { width: ${setWidth}px; left: ${setLeft}px; }`;
document.head.appendChild(styleElement);
}
}
window.addEventListener('resize', updateDropdownWidth);
updateDropdownWidth();
}
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
// 第 6 部分: 避免滑动
@@ -779,39 +854,44 @@ function limit_scroll_position() {
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
function loadLive2D() {
try {
$("<link>").attr({ href: "file=themes/waifu_plugin/waifu.css", rel: "stylesheet", type: "text/css" }).appendTo('head');
$('body').append('<div class="waifu"><div class="waifu-tips"></div><canvas id="live2d" class="live2d"></canvas><div class="waifu-tool"><span class="fui-home"></span> <span class="fui-chat"></span> <span class="fui-eye"></span> <span class="fui-user"></span> <span class="fui-photo"></span> <span class="fui-info-circle"></span> <span class="fui-cross"></span></div></div>');
$.ajax({
url: "file=themes/waifu_plugin/waifu-tips.js", dataType: "script", cache: true, success: function () {
$.ajax({
url: "file=themes/waifu_plugin/live2d.js", dataType: "script", cache: true, success: function () {
/* 可直接修改部分参数 */
live2d_settings['hitokotoAPI'] = "hitokoto.cn"; // 一言 API
live2d_settings['modelId'] = 3; // 默认模型 ID
live2d_settings['modelTexturesId'] = 44; // 默认材质 ID
live2d_settings['modelStorage'] = false; // 不储存模型 ID
live2d_settings['waifuSize'] = '210x187';
live2d_settings['waifuTipsSize'] = '187x52';
live2d_settings['canSwitchModel'] = true;
live2d_settings['canSwitchTextures'] = true;
live2d_settings['canSwitchHitokoto'] = false;
live2d_settings['canTakeScreenshot'] = false;
live2d_settings['canTurnToHomePage'] = false;
live2d_settings['canTurnToAboutPage'] = false;
live2d_settings['showHitokoto'] = false; // 显示一言
live2d_settings['showF12Status'] = false; // 显示加载状态
live2d_settings['showF12Message'] = false; // 显示看板娘消息
live2d_settings['showF12OpenMsg'] = false; // 显示控制台打开提示
live2d_settings['showCopyMessage'] = false; // 显示 复制内容 提示
live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词
/* 在 initModel 前添加 */
initModel("file=themes/waifu_plugin/waifu-tips.json");
}
});
}
});
} catch (err) { console.log("[Error] JQuery is not defined.") }
if (document.querySelector(".waifu") )
{
$('.waifu').show();
} else {
try {
$("<link>").attr({ href: "file=themes/waifu_plugin/waifu.css", rel: "stylesheet", type: "text/css" }).appendTo('head');
$('body').append('<div class="waifu"><div class="waifu-tips"></div><canvas id="live2d" class="live2d"></canvas><div class="waifu-tool"><span class="fui-home"></span> <span class="fui-chat"></span> <span class="fui-eye"></span> <span class="fui-user"></span> <span class="fui-photo"></span> <span class="fui-info-circle"></span> <span class="fui-cross"></span></div></div>');
$.ajax({
url: "file=themes/waifu_plugin/waifu-tips.js", dataType: "script", cache: true, success: function () {
$.ajax({
url: "file=themes/waifu_plugin/live2d.js", dataType: "script", cache: true, success: function () {
/* 可直接修改部分参数 */
live2d_settings['hitokotoAPI'] = "hitokoto.cn"; // 一言 API
live2d_settings['modelId'] = 3; // 默认模型 ID
live2d_settings['modelTexturesId'] = 44; // 默认材质 ID
live2d_settings['modelStorage'] = false; // 不储存模型 ID
live2d_settings['waifuSize'] = '210x187';
live2d_settings['waifuTipsSize'] = '187x52';
live2d_settings['canSwitchModel'] = true;
live2d_settings['canSwitchTextures'] = true;
live2d_settings['canSwitchHitokoto'] = false;
live2d_settings['canTakeScreenshot'] = false;
live2d_settings['canTurnToHomePage'] = false;
live2d_settings['canTurnToAboutPage'] = false;
live2d_settings['showHitokoto'] = false; // 显示一言
live2d_settings['showF12Status'] = false; // 显示加载状态
live2d_settings['showF12Message'] = false; // 显示看板娘消息
live2d_settings['showF12OpenMsg'] = false; // 显示控制台打开提示
live2d_settings['showCopyMessage'] = false; // 显示 复制内容 提示
live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词
/* 在 initModel 前添加 */
initModel("file=themes/waifu_plugin/waifu-tips.json");
}
});
}
});
} catch (err) { console.log("[Error] JQuery is not defined.") }
}
}
@@ -858,99 +938,432 @@ function gpt_academic_gradio_saveload(
}
async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout) {
// 第一部分,布局初始化
audio_fn_init();
minor_ui_adjustment();
chatbotIndicator = gradioApp().querySelector('#gpt-chatbot > div.wrap');
var chatbotObserver = new MutationObserver(() => {
chatbotContentChanged(1);
function reset_conversation(a, b) {
// console.log("js_code_reset");
a = btoa(unescape(encodeURIComponent(JSON.stringify(a))));
setCookie("js_previous_chat_cookie", a, 1);
gen_restore_btn();
return [[], [], "已重置"];
}
// clear -> 将 history 缓存至 history_cache -> 点击复原 -> restore_previous_chat() -> 触发elem_update_history -> 读取 history_cache
function restore_previous_chat() {
console.log("restore_previous_chat");
let chat = getCookie("js_previous_chat_cookie");
chat = JSON.parse(decodeURIComponent(escape(atob(chat))));
push_data_to_gradio_component(chat, "gpt-chatbot", "obj");
document.querySelector("#elem_update_history").click(); // in order to call set_history_gr_state, and send history state to server
}
function gen_restore_btn() {
// 创建按钮元素
const button = document.createElement('div');
// const recvIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><polyline points="20 6 9 17 4 12"></polyline></svg></span>';
const rec_svg = '<svg t="1714361184567" style="transform:translate(1px, 2.5px)" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="4389" width="35" height="35"><path d="M320 512h384v64H320zM320 384h384v64H320zM320 640h192v64H320z" p-id="4390" fill="#ffffff"></path><path d="M863.7 544c-1.9 44-11.4 86.8-28.5 127.2-18.5 43.8-45.1 83.2-78.9 117-33.8 33.8-73.2 60.4-117 78.9C593.9 886.3 545.7 896 496 896s-97.9-9.7-143.2-28.9c-43.8-18.5-83.2-45.1-117-78.9-33.8-33.8-60.4-73.2-78.9-117C137.7 625.9 128 577.7 128 528s9.7-97.9 28.9-143.2c18.5-43.8 45.1-83.2 78.9-117s73.2-60.4 117-78.9C398.1 169.7 446.3 160 496 160s97.9 9.7 143.2 28.9c23.5 9.9 45.8 22.2 66.5 36.7l-119.7 20 9.9 59.4 161.6-27 59.4-9.9-9.9-59.4-27-161.5-59.4 9.9 19 114.2C670.3 123.8 586.4 96 496 96 257.4 96 64 289.4 64 528s193.4 432 432 432c233.2 0 423.3-184.8 431.7-416h-64z" p-id="4391" fill="#ffffff"></path></svg>'
const recvIcon = '<span>' + rec_svg + '</span>';
// 设置按钮的样式和属性
button.id = 'floatingButton';
button.className = 'glow';
button.style.textAlign = 'center';
button.style.position = 'fixed';
button.style.bottom = '10px';
button.style.left = '10px';
button.style.width = '50px';
button.style.height = '50px';
button.style.borderRadius = '50%';
button.style.backgroundColor = '#007bff';
button.style.color = 'white';
button.style.display = 'flex';
button.style.alignItems = 'center';
button.style.justifyContent = 'center';
button.style.cursor = 'pointer';
button.style.transition = 'all 0.3s ease';
button.style.boxShadow = '0 0 10px rgba(0,0,0,0.2)';
button.innerHTML = recvIcon;
// 添加发光动画的关键帧
const styleSheet = document.createElement('style');
styleSheet.id = 'floatingButtonStyle';
styleSheet.innerText = `
@keyframes glow {
from {
box-shadow: 0 0 10px rgba(0,0,0,0.2);
}
to {
box-shadow: 0 0 13px rgba(0,0,0,0.5);
}
}
#floatingButton.glow {
animation: glow 1s infinite alternate;
}
#floatingButton:hover {
transform: scale(1.2);
box-shadow: 0 0 20px rgba(0,0,0,0.4);
}
#floatingButton.disappearing {
animation: shrinkAndDisappear 0.5s forwards;
}
`;
// only add when not exist
if (!document.getElementById('recvButtonStyle'))
{
document.head.appendChild(styleSheet);
}
// 鼠标悬停和移开的事件监听器
button.addEventListener('mouseover', function () {
this.textContent = "还原\n对话";
});
chatbotObserver.observe(chatbotIndicator, { attributes: true, childList: true, subtree: true });
if (layout === "LEFT-RIGHT") { chatbotAutoHeight(); }
if (layout === "LEFT-RIGHT") { limit_scroll_position(); }
// 第二部分,读取Cookie,初始话界面
let searchString = "";
let bool_value = "";
button.addEventListener('mouseout', function () {
this.innerHTML = recvIcon;
});
// darkmode 深色模式
if (getCookie("js_darkmode_cookie")) {
dark = getCookie("js_darkmode_cookie")
}
dark = dark == "True";
if (document.querySelectorAll('.dark').length) {
if (!dark) {
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
}
} else {
if (dark) {
document.querySelector('body').classList.add('dark');
}
// 点击事件监听器
button.addEventListener('click', function () {
// 添加一个类来触发缩小和消失的动画
restore_previous_chat();
this.classList.add('disappearing');
// 在动画结束后移除按钮
document.body.removeChild(this);
});
// only add when not exist
if (!document.getElementById('recvButton'))
{
document.body.appendChild(button);
}
// SysPrompt 系统静默提示词
gpt_academic_gradio_saveload("load", "elem_prompt", "js_system_prompt_cookie", null, "str");
// 将按钮添加到页面中
// Temperature 大模型温度参数
gpt_academic_gradio_saveload("load", "elem_temperature", "js_temperature_cookie", null, "float");
}
// clearButton 自动清除按钮
if (getCookie("js_clearbtn_show_cookie")) {
// have cookie
bool_value = getCookie("js_clearbtn_show_cookie")
bool_value = bool_value == "True";
searchString = "输入清除键";
async function on_plugin_exe_complete(fn_name) {
console.log(fn_name);
if (fn_name === "保存当前的对话") {
// get chat profile path
let chatbot = await get_data_from_gradio_component('gpt-chatbot');
let may_have_chat_profile_info = chatbot[chatbot.length - 1][1];
if (bool_value) {
// make btns appear
let clearButton = document.getElementById("elem_clear"); clearButton.style.display = "block";
let clearButton2 = document.getElementById("elem_clear2"); clearButton2.style.display = "block";
// deal with checkboxes
let arr_with_clear_btn = update_array(
await get_data_from_gradio_component('cbs'), "输入清除键", "add"
)
push_data_to_gradio_component(arr_with_clear_btn, "cbs", "no_conversion");
} else {
// make btns disappear
let clearButton = document.getElementById("elem_clear"); clearButton.style.display = "none";
let clearButton2 = document.getElementById("elem_clear2"); clearButton2.style.display = "none";
// deal with checkboxes
let arr_without_clear_btn = update_array(
await get_data_from_gradio_component('cbs'), "输入清除键", "remove"
)
push_data_to_gradio_component(arr_without_clear_btn, "cbs", "no_conversion");
}
}
function get_href(htmlString) {
const parser = new DOMParser();
const doc = parser.parseFromString(htmlString, 'text/html');
const anchor = doc.querySelector('a');
// live2d 显示
if (getCookie("js_live2d_show_cookie")) {
// have cookie
searchString = "添加Live2D形象";
bool_value = getCookie("js_live2d_show_cookie");
bool_value = bool_value == "True";
if (bool_value) {
loadLive2D();
let arr_with_live2d = update_array(
await get_data_from_gradio_component('cbsc'), "添加Live2D形象", "add"
)
push_data_to_gradio_component(arr_with_live2d, "cbsc", "no_conversion");
} else {
try {
$('.waifu').hide();
let arr_without_live2d = update_array(
await get_data_from_gradio_component('cbsc'), "添加Live2D形象", "remove"
)
push_data_to_gradio_component(arr_without_live2d, "cbsc", "no_conversion");
} catch (error) {
if (anchor) {
return anchor.getAttribute('href');
} else {
return null;
}
}
} else {
// do not have cookie
if (live2d) {
loadLive2D();
} else {
let href = get_href(may_have_chat_profile_info);
if (href) {
const cleanedHref = href.replace('file=', ''); // /home/fuqingxu/chatgpt_academic/gpt_log/default_user/chat_history/GPT-Academic对话存档2024-04-12-00-35-06.html
console.log(cleanedHref);
}
}
}
async function generate_menu(guiBase64String, btnName){
// assign the button and menu data
push_data_to_gradio_component(guiBase64String, "invisible_current_pop_up_plugin_arg", "string");
push_data_to_gradio_component(btnName, "invisible_callback_btn_for_plugin_exe", "string");
// Base64 to dict
const stringData = atob(guiBase64String);
let guiJsonData = JSON.parse(stringData);
let menu = document.getElementById("plugin_arg_menu");
gui_args = {}
for (const key in guiJsonData) {
if (guiJsonData.hasOwnProperty(key)) {
const innerJSONString = guiJsonData[key];
const decodedObject = JSON.parse(innerJSONString);
gui_args[key] = decodedObject;
}
}
}
// 使参数菜单显现
push_data_to_gradio_component({
visible: true,
__type__: 'update'
}, "plugin_arg_menu", "obj");
hide_all_elem();
// 根据 gui_args, 使得对应参数项显现
let text_cnt = 0;
let dropdown_cnt = 0;
// PLUGIN_ARG_MENU
for (const key in gui_args) {
if (gui_args.hasOwnProperty(key)) {
///////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////// Textbox ////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////
if (gui_args[key].type=='string'){ // PLUGIN_ARG_MENU
const component_name = "plugin_arg_txt_" + text_cnt;
push_data_to_gradio_component({
visible: true,
label: gui_args[key].title + "(" + gui_args[key].description + ")",
// label: gui_args[key].title,
placeholder: gui_args[key].description,
__type__: 'update'
}, component_name, "obj");
if (key === "main_input"){
// 为了与旧插件兼容,生成菜单时,自动加载输入栏的值
let current_main_input = await get_data_from_gradio_component('user_input_main');
let current_main_input_2 = await get_data_from_gradio_component('user_input_float');
push_data_to_gradio_component(current_main_input + current_main_input_2, component_name, "obj");
}
else if (key === "advanced_arg"){
// 为了与旧插件兼容,生成菜单时,自动加载旧高级参数输入区的值
let advance_arg_input_legacy = await get_data_from_gradio_component('advance_arg_input_legacy');
push_data_to_gradio_component(advance_arg_input_legacy, component_name, "obj");
}
else {
push_data_to_gradio_component(gui_args[key].default_value, component_name, "obj");
}
document.getElementById(component_name).parentNode.parentNode.style.display = '';
text_cnt += 1;
}
///////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////// Dropdown ////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////
if (gui_args[key].type=='dropdown'){ // PLUGIN_ARG_MENU
const component_name = "plugin_arg_drop_" + dropdown_cnt;
push_data_to_gradio_component({
visible: true,
choices: gui_args[key].options,
label: gui_args[key].title + "(" + gui_args[key].description + ")",
// label: gui_args[key].title,
placeholder: gui_args[key].description,
__type__: 'update'
}, component_name, "obj");
push_data_to_gradio_component(gui_args[key].default_value, component_name, "obj");
document.getElementById(component_name).parentNode.style.display = '';
dropdown_cnt += 1;
}
}
}
}
async function execute_current_pop_up_plugin(){
let guiBase64String = await get_data_from_gradio_component('invisible_current_pop_up_plugin_arg');
const stringData = atob(guiBase64String);
let guiJsonData = JSON.parse(stringData);
gui_args = {}
for (const key in guiJsonData) {
if (guiJsonData.hasOwnProperty(key)) {
const innerJSONString = guiJsonData[key];
const decodedObject = JSON.parse(innerJSONString);
gui_args[key] = decodedObject;
}
}
// read user confirmed value
let text_cnt = 0;
for (const key in gui_args) {
if (gui_args.hasOwnProperty(key)) {
if (gui_args[key].type=='string'){ // PLUGIN_ARG_MENU
corrisponding_elem_id = "plugin_arg_txt_"+text_cnt
gui_args[key].user_confirmed_value = await get_data_from_gradio_component(corrisponding_elem_id);
text_cnt += 1;
}
}
}
let dropdown_cnt = 0;
for (const key in gui_args) {
if (gui_args.hasOwnProperty(key)) {
if (gui_args[key].type=='dropdown'){ // PLUGIN_ARG_MENU
corrisponding_elem_id = "plugin_arg_drop_"+dropdown_cnt
gui_args[key].user_confirmed_value = await get_data_from_gradio_component(corrisponding_elem_id);
dropdown_cnt += 1;
}
}
}
// close menu
push_data_to_gradio_component({
visible: false,
__type__: 'update'
}, "plugin_arg_menu", "obj");
hide_all_elem();
// execute the plugin
push_data_to_gradio_component(JSON.stringify(gui_args), "invisible_current_pop_up_plugin_arg_final", "string");
document.getElementById("invisible_callback_btn_for_plugin_exe").click();
}
function hide_all_elem(){
// PLUGIN_ARG_MENU
for (text_cnt = 0; text_cnt < 8; text_cnt++){
push_data_to_gradio_component({
visible: false,
label: "",
__type__: 'update'
}, "plugin_arg_txt_"+text_cnt, "obj");
document.getElementById("plugin_arg_txt_"+text_cnt).parentNode.parentNode.style.display = 'none';
}
for (dropdown_cnt = 0; dropdown_cnt < 8; dropdown_cnt++){
push_data_to_gradio_component({
visible: false,
choices: [],
label: "",
__type__: 'update'
}, "plugin_arg_drop_"+dropdown_cnt, "obj");
document.getElementById("plugin_arg_drop_"+dropdown_cnt).parentNode.style.display = 'none';
}
}
function close_current_pop_up_plugin(){
// PLUGIN_ARG_MENU
push_data_to_gradio_component({
visible: false,
__type__: 'update'
}, "plugin_arg_menu", "obj");
hide_all_elem();
}
// 生成高级插件的选择菜单
plugin_init_info_lib = {}
function register_plugin_init(key, base64String){
// console.log('x')
const stringData = atob(base64String);
let guiJsonData = JSON.parse(stringData);
if (key in plugin_init_info_lib)
{
}
else
{
plugin_init_info_lib[key] = {};
}
plugin_init_info_lib[key].info = guiJsonData.Info;
plugin_init_info_lib[key].color = guiJsonData.Color;
plugin_init_info_lib[key].elem_id = guiJsonData.ButtonElemId;
plugin_init_info_lib[key].label = guiJsonData.Label
plugin_init_info_lib[key].enable_advanced_arg = guiJsonData.AdvancedArgs;
plugin_init_info_lib[key].arg_reminder = guiJsonData.ArgsReminder;
}
function register_advanced_plugin_init_code(key, code){
if (key in plugin_init_info_lib)
{
}
else
{
plugin_init_info_lib[key] = {};
}
plugin_init_info_lib[key].secondary_menu_code = code;
}
function run_advanced_plugin_launch_code(key){
// convert js code string to function
generate_menu(plugin_init_info_lib[key].secondary_menu_code, key);
}
function on_flex_button_click(key){
if (plugin_init_info_lib.hasOwnProperty(key) && plugin_init_info_lib[key].hasOwnProperty('secondary_menu_code')){
run_advanced_plugin_launch_code(key);
}else{
document.getElementById("old_callback_btn_for_plugin_exe").click();
}
}
async function run_dropdown_shift(dropdown){
let key = dropdown;
push_data_to_gradio_component({
value: key,
variant: plugin_init_info_lib[key].color,
info_str: plugin_init_info_lib[key].info,
__type__: 'update'
}, "elem_switchy_bt", "obj");
if (plugin_init_info_lib[key].enable_advanced_arg){
push_data_to_gradio_component({
visible: true,
label: plugin_init_info_lib[key].label,
__type__: 'update'
}, "advance_arg_input_legacy", "obj");
} else {
push_data_to_gradio_component({
visible: false,
label: plugin_init_info_lib[key].label,
__type__: 'update'
}, "advance_arg_input_legacy", "obj");
}
}
async function duplicate_in_new_window() {
// 获取当前页面的URL
var url = window.location.href;
// 在新标签页中打开这个URL
window.open(url, '_blank');
}
async function run_classic_plugin_via_id(plugin_elem_id){
for (key in plugin_init_info_lib){
if (plugin_init_info_lib[key].elem_id == plugin_elem_id){
// 获取按钮名称
let current_btn_name = await get_data_from_gradio_component(plugin_elem_id);
// 执行
call_plugin_via_name(current_btn_name);
return;
}
}
return;
}
async function call_plugin_via_name(current_btn_name) {
gui_args = {}
// 关闭菜单 (如果处于开启状态)
push_data_to_gradio_component({
visible: false,
__type__: 'update'
}, "plugin_arg_menu", "obj");
hide_all_elem();
// 为了与旧插件兼容,生成菜单时,自动加载旧高级参数输入区的值
let advance_arg_input_legacy = await get_data_from_gradio_component('advance_arg_input_legacy');
if (advance_arg_input_legacy.length != 0){
gui_args["advanced_arg"] = {};
gui_args["advanced_arg"].user_confirmed_value = advance_arg_input_legacy;
}
// execute the plugin
push_data_to_gradio_component(JSON.stringify(gui_args), "invisible_current_pop_up_plugin_arg_final", "string");
push_data_to_gradio_component(current_btn_name, "invisible_callback_btn_for_plugin_exe", "string");
document.getElementById("invisible_callback_btn_for_plugin_exe").click();
}
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
// 多用途复用提交按钮
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
async function click_real_submit_btn() {
document.getElementById("elem_submit").click();
}
async function multiplex_function_begin(multiplex_sel) {
if (multiplex_sel === "常规对话") {
click_real_submit_btn();
return;
}
if (multiplex_sel === "多模型对话") {
let _align_name_in_crazy_function_py = "询问多个GPT模型";
call_plugin_via_name(_align_name_in_crazy_function_py);
return;
}
}
async function run_multiplex_shift(multiplex_sel){
let key = multiplex_sel;
if (multiplex_sel === "常规对话") {
key = "提交";
} else {
key = "提交 (" + multiplex_sel + ")";
}
push_data_to_gradio_component({
value: key,
__type__: 'update'
}, "elem_submit_visible", "obj");
}

某些文件未显示,因为此 diff 中更改的文件太多 显示更多