镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-10 16:36:48 +00:00
比较提交
47 次代码提交
version3.7
...
huggingfac
| 作者 | SHA1 | 提交日期 | |
|---|---|---|---|
|
|
68ff3660ae | ||
|
|
d0703ef32d | ||
|
|
47289f863d | ||
|
|
eaf27df32a | ||
|
|
d245958dfa | ||
|
|
8dd4d48474 | ||
|
|
15f14f51ff | ||
|
|
1de63835fc | ||
|
|
17d0a32f36 | ||
|
|
971ac206f3 | ||
|
|
c89c62b914 | ||
|
|
f8946c13f2 | ||
|
|
098d8654b3 | ||
|
|
ac8830e30e | ||
|
|
5c0a0882c8 | ||
|
|
f5357f67ca | ||
|
|
a1fe67d7f2 | ||
|
|
cbee909bc8 | ||
|
|
8a5e8bc5c1 | ||
|
|
96c1852abc | ||
|
|
cd145c0794 | ||
|
|
7a4d4ad956 | ||
|
|
9f9848c6e9 | ||
|
|
94425c49fd | ||
|
|
e874a16050 | ||
|
|
c28388c5fe | ||
|
|
b4a56d391b | ||
|
|
7075092f86 | ||
|
|
1086ff8092 | ||
|
|
3a22446b47 | ||
|
|
7842cf03cc | ||
|
|
54f55c32f2 | ||
|
|
94318ff0a2 | ||
|
|
5be6b83762 | ||
|
|
6f18d1716e | ||
|
|
90944bd744 | ||
|
|
752937cb70 | ||
|
|
c584cbac5b | ||
|
|
309d12b404 | ||
|
|
52ea0acd61 | ||
|
|
9f5e3e0fd5 | ||
|
|
315e78e5d9 | ||
|
|
b6b4ba684a | ||
|
|
2281a5ca7f | ||
|
|
49558686f2 | ||
|
|
b050ccedb5 | ||
|
|
ae56cab6f4 |
8
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
8
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -11,8 +11,6 @@ body:
|
|||||||
- Please choose | 请选择
|
- Please choose | 请选择
|
||||||
- Pip Install (I ignored requirements.txt)
|
- Pip Install (I ignored requirements.txt)
|
||||||
- Pip Install (I used latest requirements.txt)
|
- Pip Install (I used latest requirements.txt)
|
||||||
- OneKeyInstall (一键安装脚本-windows)
|
|
||||||
- OneKeyInstall (一键安装脚本-mac)
|
|
||||||
- Anaconda (I ignored requirements.txt)
|
- Anaconda (I ignored requirements.txt)
|
||||||
- Anaconda (I used latest requirements.txt)
|
- Anaconda (I used latest requirements.txt)
|
||||||
- Docker(Windows/Mac)
|
- Docker(Windows/Mac)
|
||||||
@@ -69,3 +67,9 @@ body:
|
|||||||
attributes:
|
attributes:
|
||||||
label: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
|
label: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
|
||||||
description: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
|
description: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
5
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
5
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -21,3 +21,8 @@ body:
|
|||||||
attributes:
|
attributes:
|
||||||
label: Feature Request | 功能请求
|
label: Feature Request | 功能请求
|
||||||
description: Feature Request | 功能请求
|
description: Feature Request | 功能请求
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,44 +0,0 @@
|
|||||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
|
||||||
name: build-with-all-capacity-beta
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- 'master'
|
|
||||||
|
|
||||||
env:
|
|
||||||
REGISTRY: ghcr.io
|
|
||||||
IMAGE_NAME: ${{ github.repository }}_with_all_capacity_beta
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-and-push-image:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
|
||||||
registry: ${{ env.REGISTRY }}
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Extract metadata (tags, labels) for Docker
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v4
|
|
||||||
with:
|
|
||||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
|
||||||
|
|
||||||
- name: Build and push Docker image
|
|
||||||
uses: docker/build-push-action@v4
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
push: true
|
|
||||||
file: docs/GithubAction+AllCapacityBeta
|
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
44
.github/workflows/build-with-all-capacity.yml
vendored
44
.github/workflows/build-with-all-capacity.yml
vendored
@@ -1,44 +0,0 @@
|
|||||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
|
||||||
name: build-with-all-capacity
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- 'master'
|
|
||||||
|
|
||||||
env:
|
|
||||||
REGISTRY: ghcr.io
|
|
||||||
IMAGE_NAME: ${{ github.repository }}_with_all_capacity
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-and-push-image:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
|
||||||
registry: ${{ env.REGISTRY }}
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Extract metadata (tags, labels) for Docker
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v4
|
|
||||||
with:
|
|
||||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
|
||||||
|
|
||||||
- name: Build and push Docker image
|
|
||||||
uses: docker/build-push-action@v4
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
push: true
|
|
||||||
file: docs/GithubAction+AllCapacity
|
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
44
.github/workflows/build-with-audio-assistant.yml
vendored
44
.github/workflows/build-with-audio-assistant.yml
vendored
@@ -1,44 +0,0 @@
|
|||||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
|
||||||
name: build-with-audio-assistant
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- 'master'
|
|
||||||
|
|
||||||
env:
|
|
||||||
REGISTRY: ghcr.io
|
|
||||||
IMAGE_NAME: ${{ github.repository }}_audio_assistant
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-and-push-image:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
|
||||||
registry: ${{ env.REGISTRY }}
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Extract metadata (tags, labels) for Docker
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v4
|
|
||||||
with:
|
|
||||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
|
||||||
|
|
||||||
- name: Build and push Docker image
|
|
||||||
uses: docker/build-push-action@v4
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
push: true
|
|
||||||
file: docs/GithubAction+NoLocal+AudioAssistant
|
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
2
.github/workflows/build-with-chatglm.yml
vendored
2
.github/workflows/build-with-chatglm.yml
vendored
@@ -1,5 +1,5 @@
|
|||||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||||
name: build-with-chatglm
|
name: Create and publish a Docker image for ChatGLM support
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
|||||||
2
.github/workflows/build-with-jittorllms.yml
vendored
2
.github/workflows/build-with-jittorllms.yml
vendored
@@ -1,5 +1,5 @@
|
|||||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||||
name: build-with-jittorllms
|
name: Create and publish a Docker image for ChatGLM support
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
|||||||
2
.github/workflows/build-with-latex.yml
vendored
2
.github/workflows/build-with-latex.yml
vendored
@@ -1,5 +1,5 @@
|
|||||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||||
name: build-with-latex
|
name: Create and publish a Docker image for Latex support
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||||
name: build-without-local-llms
|
name: Create and publish a Docker image
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
|||||||
25
.github/workflows/stale.yml
vendored
25
.github/workflows/stale.yml
vendored
@@ -1,25 +0,0 @@
|
|||||||
# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
|
|
||||||
#
|
|
||||||
# You can adjust the behavior by modifying this file.
|
|
||||||
# For more information, see:
|
|
||||||
# https://github.com/actions/stale
|
|
||||||
|
|
||||||
name: 'Close stale issues and PRs'
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: '*/5 * * * *'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
stale:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
pull-requests: read
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/stale@v8
|
|
||||||
with:
|
|
||||||
stale-issue-message: 'This issue is stale because it has been open 100 days with no activity. Remove stale label or comment or this will be closed in 1 days.'
|
|
||||||
days-before-stale: 100
|
|
||||||
days-before-close: 1
|
|
||||||
debug-only: true
|
|
||||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -146,11 +146,7 @@ debug*
|
|||||||
private*
|
private*
|
||||||
crazy_functions/test_project/pdf_and_word
|
crazy_functions/test_project/pdf_and_word
|
||||||
crazy_functions/test_samples
|
crazy_functions/test_samples
|
||||||
request_llms/jittorllms
|
request_llm/jittorllms
|
||||||
multi-language
|
multi-language
|
||||||
request_llms/moss
|
request_llm/moss
|
||||||
media
|
media
|
||||||
flagged
|
|
||||||
request_llms/ChatGLM-6b-onnx-u8s8
|
|
||||||
.pre-commit-config.yaml
|
|
||||||
themes/common.js.min.*.js
|
|
||||||
32
.pre-commit-config.yaml
普通文件
32
.pre-commit-config.yaml
普通文件
@@ -0,0 +1,32 @@
|
|||||||
|
default_language_version:
|
||||||
|
python: python3
|
||||||
|
exclude: 'dotnet'
|
||||||
|
ci:
|
||||||
|
autofix_prs: true
|
||||||
|
autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
|
||||||
|
autoupdate_schedule: 'quarterly'
|
||||||
|
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v4.4.0
|
||||||
|
hooks:
|
||||||
|
- id: check-ast
|
||||||
|
# - id: check-yaml
|
||||||
|
- id: check-toml
|
||||||
|
- id: check-json
|
||||||
|
- id: check-byte-order-marker
|
||||||
|
exclude: .gitignore
|
||||||
|
- id: check-merge-conflict
|
||||||
|
- id: detect-private-key
|
||||||
|
- id: trailing-whitespace
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: no-commit-to-branch
|
||||||
|
- repo: https://github.com/psf/black
|
||||||
|
rev: 23.3.0
|
||||||
|
hooks:
|
||||||
|
- id: black
|
||||||
|
# - repo: https://github.com/charliermarsh/ruff-pre-commit
|
||||||
|
# rev: v0.0.261
|
||||||
|
# hooks:
|
||||||
|
# - id: ruff
|
||||||
|
# args: ["--fix"]
|
||||||
29
README.md
29
README.md
@@ -1,8 +1,20 @@
|
|||||||
> [!IMPORTANT]
|
---
|
||||||
> 2024.5.1: 加入Doc2x翻译PDF论文的功能,[查看详情](https://github.com/binary-husky/gpt_academic/wiki/Doc2x)
|
title: GPT-Academic
|
||||||
> 2024.4.30: 3.75版本引入Edge-TTS和SoVits语音克隆模块,[查看详情](https://www.bilibili.com/video/BV1Rp421S7tF/)
|
emoji: 😻
|
||||||
> 2024.3.11: 恭迎Claude3和Moonshot,全力支持Qwen、GLM、DeepseekCoder等中文大语言模型!
|
colorFrom: blue
|
||||||
> 2024.1.17: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目完全开源免费,您可通过订阅[在线服务](https://github.com/binary-husky/gpt_academic/wiki/online)的方式鼓励本项目的发展。
|
colorTo: blue
|
||||||
|
sdk: gradio
|
||||||
|
sdk_version: 3.32.0
|
||||||
|
app_file: app.py
|
||||||
|
pinned: false
|
||||||
|
---
|
||||||
|
|
||||||
|
# ChatGPT 学术优化
|
||||||
|
> **Note**
|
||||||
|
>
|
||||||
|
> 2023.11.12: 某些依赖包尚不兼容python 3.12,推荐python 3.11。
|
||||||
|
>
|
||||||
|
> 2023.12.26: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目完全开源免费,您可通过订阅[在线服务](https://github.com/binary-husky/gpt_academic/wiki/online)的方式鼓励本项目的发展。
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
@@ -87,10 +99,6 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
|
|||||||
<img src="https://user-images.githubusercontent.com/96192199/279702205-d81137c3-affd-4cd1-bb5e-b15610389762.gif" width="700" >
|
<img src="https://user-images.githubusercontent.com/96192199/279702205-d81137c3-affd-4cd1-bb5e-b15610389762.gif" width="700" >
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div align="center">
|
|
||||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/70ff1ec5-e589-4561-a29e-b831079b37fb.gif" width="700" >
|
|
||||||
</div>
|
|
||||||
|
|
||||||
|
|
||||||
- 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放剪贴板
|
- 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放剪贴板
|
||||||
<div align="center">
|
<div align="center">
|
||||||
@@ -257,7 +265,8 @@ P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以
|
|||||||
# Advanced Usage
|
# Advanced Usage
|
||||||
### I:自定义新的便捷按钮(学术快捷键)
|
### I:自定义新的便捷按钮(学术快捷键)
|
||||||
|
|
||||||
现在已可以通过UI中的`界面外观`菜单中的`自定义菜单`添加新的便捷按钮。如果需要在代码中定义,请使用任意文本编辑器打开`core_functional.py`,添加如下条目即可:
|
任意文本编辑器打开`core_functional.py`,添加如下条目,然后重启程序。(如果按钮已存在,那么可以直接修改(前缀、后缀都已支持热修改),无需重启程序即可生效。)
|
||||||
|
例如
|
||||||
|
|
||||||
```python
|
```python
|
||||||
"超级英译中": {
|
"超级英译中": {
|
||||||
|
|||||||
159
main.py → app.py
159
main.py → app.py
@@ -1,4 +1,4 @@
|
|||||||
import os, json; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||||
|
|
||||||
help_menu_description = \
|
help_menu_description = \
|
||||||
"""Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic),
|
"""Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic),
|
||||||
@@ -13,41 +13,37 @@ help_menu_description = \
|
|||||||
</br></br>如何语音对话: 请阅读Wiki
|
</br></br>如何语音对话: 请阅读Wiki
|
||||||
</br></br>如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)"""
|
</br></br>如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)"""
|
||||||
|
|
||||||
def enable_log(PATH_LOGGING):
|
|
||||||
import logging
|
|
||||||
admin_log_path = os.path.join(PATH_LOGGING, "admin")
|
|
||||||
os.makedirs(admin_log_path, exist_ok=True)
|
|
||||||
log_dir = os.path.join(admin_log_path, "chat_secrets.log")
|
|
||||||
try:logging.basicConfig(filename=log_dir, level=logging.INFO, encoding="utf-8", format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
|
||||||
except:logging.basicConfig(filename=log_dir, level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
|
||||||
# Disable logging output from the 'httpx' logger
|
|
||||||
logging.getLogger("httpx").setLevel(logging.WARNING)
|
|
||||||
print(f"所有对话记录将自动保存在本地目录{log_dir}, 请注意自我隐私保护哦!")
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
import subprocess, sys
|
||||||
|
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'https://public.agent-matrix.com/publish/gradio-3.32.8-py3-none-any.whl'])
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
if gr.__version__ not in ['3.32.9']:
|
if gr.__version__ not in ['3.32.8']:
|
||||||
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
||||||
from request_llms.bridge_all import predict
|
from request_llms.bridge_all import predict
|
||||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
|
||||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址
|
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址
|
||||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
||||||
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
||||||
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME, ADD_WAIFU = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME', 'ADD_WAIFU')
|
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME, ADD_WAIFU = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME', 'ADD_WAIFU')
|
||||||
NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE')
|
DARK_MODE, NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('DARK_MODE', 'NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE')
|
||||||
DARK_MODE, INIT_SYS_PROMPT, ADD_WAIFU, TTS_TYPE = get_conf('DARK_MODE', 'INIT_SYS_PROMPT', 'ADD_WAIFU', 'TTS_TYPE')
|
INIT_SYS_PROMPT = get_conf('INIT_SYS_PROMPT')
|
||||||
if LLM_MODEL not in AVAIL_LLM_MODELS: AVAIL_LLM_MODELS += [LLM_MODEL]
|
|
||||||
|
|
||||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||||
from check_proxy import get_current_version
|
from check_proxy import get_current_version
|
||||||
from themes.theme import adjust_theme, advanced_css, theme_declaration, js_code_clear, js_code_reset, js_code_show_or_hide, js_code_show_or_hide_group2
|
from themes.theme import adjust_theme, advanced_css, theme_declaration, js_code_clear, js_code_reset, js_code_show_or_hide, js_code_show_or_hide_group2
|
||||||
from themes.theme import js_code_for_css_changing, js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init
|
from themes.theme import js_code_for_css_changing, js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init
|
||||||
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, assign_user_uuid
|
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, init_cookie
|
||||||
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
|
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
|
||||||
|
|
||||||
# 对话、日志记录
|
# 问询记录, python 版本建议3.9+(越新越好)
|
||||||
enable_log(PATH_LOGGING)
|
import logging, uuid
|
||||||
|
os.makedirs(PATH_LOGGING, exist_ok=True)
|
||||||
|
try:logging.basicConfig(filename=f"{PATH_LOGGING}/chat_secrets.log", level=logging.INFO, encoding="utf-8", format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
||||||
|
except:logging.basicConfig(filename=f"{PATH_LOGGING}/chat_secrets.log", level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
||||||
|
# Disable logging output from the 'httpx' logger
|
||||||
|
logging.getLogger("httpx").setLevel(logging.WARNING)
|
||||||
|
print(f"所有问询记录将自动保存在本地目录./{PATH_LOGGING}/chat_secrets.log, 请注意自我隐私保护哦!")
|
||||||
|
|
||||||
# 一些普通功能模块
|
# 一些普通功能模块
|
||||||
from core_functional import get_core_functions
|
from core_functional import get_core_functions
|
||||||
@@ -80,22 +76,20 @@ def main():
|
|||||||
cancel_handles = []
|
cancel_handles = []
|
||||||
customize_btns = {}
|
customize_btns = {}
|
||||||
predefined_btns = {}
|
predefined_btns = {}
|
||||||
from shared_utils.cookie_manager import make_cookie_cache, make_history_cache
|
with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
||||||
with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as app_block:
|
|
||||||
gr.HTML(title_html)
|
gr.HTML(title_html)
|
||||||
secret_css = gr.Textbox(visible=False, elem_id="secret_css")
|
gr.HTML('''<center><a href="https://huggingface.co/spaces/qingxu98/gpt-academic?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>请您打开此页面后务必点击上方的“复制空间”(Duplicate Space)按钮!<font color="#FF00FF">使用时,先在输入框填入API-KEY然后回车。</font><br/>切忌在“复制空间”(Duplicate Space)之前填入API_KEY或进行提问,否则您的API_KEY将极可能被空间所有者攫取!<br/>支持任意数量的OpenAI的密钥和API2D的密钥共存,例如输入"OpenAI密钥1,API2D密钥2",然后提交,即可同时使用两种模型接口。</center>''')
|
||||||
|
secret_css, dark_mode, py_pickle_cookie = gr.Textbox(visible=False), gr.Textbox(DARK_MODE, visible=False), gr.Textbox(visible=False)
|
||||||
|
cookies = gr.State(load_chat_cookies())
|
||||||
cookies, web_cookie_cache = make_cookie_cache() # 定义 后端state(cookies)、前端(web_cookie_cache)两兄弟
|
|
||||||
with gr_L1():
|
with gr_L1():
|
||||||
with gr_L2(scale=2, elem_id="gpt-chat"):
|
with gr_L2(scale=2, elem_id="gpt-chat"):
|
||||||
chatbot = gr.Chatbot(label=f"当前模型:{LLM_MODEL}", elem_id="gpt-chatbot")
|
chatbot = gr.Chatbot(label=f"当前模型:{LLM_MODEL}", elem_id="gpt-chatbot")
|
||||||
if LAYOUT == "TOP-DOWN": chatbot.style(height=CHATBOT_HEIGHT)
|
if LAYOUT == "TOP-DOWN": chatbot.style(height=CHATBOT_HEIGHT)
|
||||||
history, history_cache, history_cache_update = make_history_cache() # 定义 后端state(history)、前端(history_cache)、后端setter(history_cache_update)三兄弟
|
history = gr.State([])
|
||||||
with gr_L2(scale=1, elem_id="gpt-panel"):
|
with gr_L2(scale=1, elem_id="gpt-panel"):
|
||||||
with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary:
|
with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary:
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
txt = gr.Textbox(show_label=False, placeholder="Input question here.", elem_id='user_input_main').style(container=False)
|
txt = gr.Textbox(show_label=False, lines=2, placeholder="输入问题或API密钥,输入多个密钥时,用英文逗号间隔。支持多个OpenAI密钥共存。").style(container=False)
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
submitBtn = gr.Button("提交", elem_id="elem_submit", variant="primary")
|
submitBtn = gr.Button("提交", elem_id="elem_submit", variant="primary")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
@@ -159,17 +153,11 @@ def main():
|
|||||||
file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload_float")
|
file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload_float")
|
||||||
|
|
||||||
with gr.Tab("更换模型", elem_id="interact-panel"):
|
with gr.Tab("更换模型", elem_id="interact-panel"):
|
||||||
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, elem_id="elem_model_sel", label="更换LLM模型/请求源").style(container=False)
|
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
|
||||||
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
||||||
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature", elem_id="elem_temperature")
|
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
||||||
max_length_sl = gr.Slider(minimum=256, maximum=1024*32, value=4096, step=128, interactive=True, label="Local LLM MaxLength",)
|
max_length_sl = gr.Slider(minimum=256, maximum=1024*32, value=4096, step=128, interactive=True, label="Local LLM MaxLength",)
|
||||||
system_prompt = gr.Textbox(show_label=True, lines=2, placeholder=f"System Prompt", label="System prompt", value=INIT_SYS_PROMPT, elem_id="elem_prompt")
|
system_prompt = gr.Textbox(show_label=True, lines=2, placeholder=f"System Prompt", label="System prompt", value=INIT_SYS_PROMPT)
|
||||||
temperature.change(None, inputs=[temperature], outputs=None,
|
|
||||||
_js="""(temperature)=>gpt_academic_gradio_saveload("save", "elem_prompt", "js_temperature_cookie", temperature)""")
|
|
||||||
system_prompt.change(None, inputs=[system_prompt], outputs=None,
|
|
||||||
_js="""(system_prompt)=>gpt_academic_gradio_saveload("save", "elem_prompt", "js_system_prompt_cookie", system_prompt)""")
|
|
||||||
md_dropdown.change(None, inputs=[md_dropdown], outputs=None,
|
|
||||||
_js="""(md_dropdown)=>gpt_academic_gradio_saveload("save", "elem_model_sel", "js_md_dropdown_cookie", md_dropdown)""")
|
|
||||||
|
|
||||||
with gr.Tab("界面外观", elem_id="interact-panel"):
|
with gr.Tab("界面外观", elem_id="interact-panel"):
|
||||||
theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False)
|
theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False)
|
||||||
@@ -209,19 +197,64 @@ def main():
|
|||||||
with gr.Column(scale=1, min_width=70):
|
with gr.Column(scale=1, min_width=70):
|
||||||
basic_fn_confirm = gr.Button("确认并保存", variant="primary"); basic_fn_confirm.style(size="sm")
|
basic_fn_confirm = gr.Button("确认并保存", variant="primary"); basic_fn_confirm.style(size="sm")
|
||||||
basic_fn_clean = gr.Button("恢复默认", variant="primary"); basic_fn_clean.style(size="sm")
|
basic_fn_clean = gr.Button("恢复默认", variant="primary"); basic_fn_clean.style(size="sm")
|
||||||
|
def assign_btn(persistent_cookie_, cookies_, basic_btn_dropdown_, basic_fn_title, basic_fn_prefix, basic_fn_suffix, clean_up=False):
|
||||||
|
ret = {}
|
||||||
|
# 读取之前的自定义按钮
|
||||||
|
customize_fn_overwrite_ = cookies_['customize_fn_overwrite']
|
||||||
|
# 更新新的自定义按钮
|
||||||
|
customize_fn_overwrite_.update({
|
||||||
|
basic_btn_dropdown_:
|
||||||
|
{
|
||||||
|
"Title":basic_fn_title,
|
||||||
|
"Prefix":basic_fn_prefix,
|
||||||
|
"Suffix":basic_fn_suffix,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
if clean_up:
|
||||||
|
customize_fn_overwrite_ = {}
|
||||||
|
cookies_.update(customize_fn_overwrite_) # 更新cookie
|
||||||
|
visible = (not clean_up) and (basic_fn_title != "")
|
||||||
|
if basic_btn_dropdown_ in customize_btns:
|
||||||
|
# 是自定义按钮,不是预定义按钮
|
||||||
|
ret.update({customize_btns[basic_btn_dropdown_]: gr.update(visible=visible, value=basic_fn_title)})
|
||||||
|
else:
|
||||||
|
# 是预定义按钮
|
||||||
|
ret.update({predefined_btns[basic_btn_dropdown_]: gr.update(visible=visible, value=basic_fn_title)})
|
||||||
|
ret.update({cookies: cookies_})
|
||||||
|
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
|
||||||
|
except: persistent_cookie_ = {}
|
||||||
|
persistent_cookie_["custom_bnt"] = customize_fn_overwrite_ # dict update new value
|
||||||
|
persistent_cookie_ = to_cookie_str(persistent_cookie_) # persistent cookie to dict
|
||||||
|
ret.update({py_pickle_cookie: persistent_cookie_}) # write persistent cookie
|
||||||
|
return ret
|
||||||
|
|
||||||
from shared_utils.cookie_manager import assign_btn__fn_builder
|
|
||||||
assign_btn = assign_btn__fn_builder(customize_btns, predefined_btns, cookies, web_cookie_cache)
|
|
||||||
# update btn
|
# update btn
|
||||||
h = basic_fn_confirm.click(assign_btn, [web_cookie_cache, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
|
h = basic_fn_confirm.click(assign_btn, [py_pickle_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
|
||||||
[web_cookie_cache, cookies, *customize_btns.values(), *predefined_btns.values()])
|
[py_pickle_cookie, cookies, *customize_btns.values(), *predefined_btns.values()])
|
||||||
h.then(None, [web_cookie_cache], None, _js="""(web_cookie_cache)=>{setCookie("web_cookie_cache", web_cookie_cache, 365);}""")
|
h.then(None, [py_pickle_cookie], None, _js="""(py_pickle_cookie)=>{setCookie("py_pickle_cookie", py_pickle_cookie, 365);}""")
|
||||||
# clean up btn
|
# clean up btn
|
||||||
h2 = basic_fn_clean.click(assign_btn, [web_cookie_cache, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix, gr.State(True)],
|
h2 = basic_fn_clean.click(assign_btn, [py_pickle_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix, gr.State(True)],
|
||||||
[web_cookie_cache, cookies, *customize_btns.values(), *predefined_btns.values()])
|
[py_pickle_cookie, cookies, *customize_btns.values(), *predefined_btns.values()])
|
||||||
h2.then(None, [web_cookie_cache], None, _js="""(web_cookie_cache)=>{setCookie("web_cookie_cache", web_cookie_cache, 365);}""")
|
h2.then(None, [py_pickle_cookie], None, _js="""(py_pickle_cookie)=>{setCookie("py_pickle_cookie", py_pickle_cookie, 365);}""")
|
||||||
|
|
||||||
|
def persistent_cookie_reload(persistent_cookie_, cookies_):
|
||||||
|
ret = {}
|
||||||
|
for k in customize_btns:
|
||||||
|
ret.update({customize_btns[k]: gr.update(visible=False, value="")})
|
||||||
|
|
||||||
|
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
|
||||||
|
except: return ret
|
||||||
|
|
||||||
|
customize_fn_overwrite_ = persistent_cookie_.get("custom_bnt", {})
|
||||||
|
cookies_['customize_fn_overwrite'] = customize_fn_overwrite_
|
||||||
|
ret.update({cookies: cookies_})
|
||||||
|
|
||||||
|
for k,v in persistent_cookie_["custom_bnt"].items():
|
||||||
|
if v['Title'] == "": continue
|
||||||
|
if k in customize_btns: ret.update({customize_btns[k]: gr.update(visible=True, value=v['Title'])})
|
||||||
|
else: ret.update({predefined_btns[k]: gr.update(visible=True, value=v['Title'])})
|
||||||
|
return ret
|
||||||
|
|
||||||
# 功能区显示开关与功能区的互动
|
# 功能区显示开关与功能区的互动
|
||||||
def fn_area_visibility(a):
|
def fn_area_visibility(a):
|
||||||
@@ -253,10 +286,8 @@ def main():
|
|||||||
cancel_handles.append(submitBtn2.click(**predict_args))
|
cancel_handles.append(submitBtn2.click(**predict_args))
|
||||||
resetBtn.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
|
resetBtn.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
|
||||||
resetBtn2.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
|
resetBtn2.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
|
||||||
reset_server_side_args = (lambda history: ([], [], "已重置", json.dumps(history)),
|
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) # 再在后端清除history
|
||||||
[history], [chatbot, history, status, history_cache])
|
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) # 再在后端清除history
|
||||||
resetBtn.click(*reset_server_side_args) # 再在后端清除history,把history转存history_cache备用
|
|
||||||
resetBtn2.click(*reset_server_side_args) # 再在后端清除history,把history转存history_cache备用
|
|
||||||
clearBtn.click(None, None, [txt, txt2], _js=js_code_clear)
|
clearBtn.click(None, None, [txt, txt2], _js=js_code_clear)
|
||||||
clearBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
|
clearBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
|
||||||
if AUTO_CLEAR_TXT:
|
if AUTO_CLEAR_TXT:
|
||||||
@@ -279,7 +310,7 @@ def main():
|
|||||||
for k in plugins:
|
for k in plugins:
|
||||||
if not plugins[k].get("AsButton", True): continue
|
if not plugins[k].get("AsButton", True): continue
|
||||||
click_handle = plugins[k]["Button"].click(ArgsGeneralWrapper(plugins[k]["Function"]), [*input_combo], output_combo)
|
click_handle = plugins[k]["Button"].click(ArgsGeneralWrapper(plugins[k]["Function"]), [*input_combo], output_combo)
|
||||||
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot]).then(None, [plugins[k]["Button"]], None, _js=r"(fn)=>on_plugin_exe_complete(fn)")
|
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot])
|
||||||
cancel_handles.append(click_handle)
|
cancel_handles.append(click_handle)
|
||||||
# 函数插件-下拉菜单与随变按钮的互动
|
# 函数插件-下拉菜单与随变按钮的互动
|
||||||
def on_dropdown_changed(k):
|
def on_dropdown_changed(k):
|
||||||
@@ -317,7 +348,7 @@ def main():
|
|||||||
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
||||||
yield from ArgsGeneralWrapper(plugins[k]["Function"])(request, *args, **kwargs)
|
yield from ArgsGeneralWrapper(plugins[k]["Function"])(request, *args, **kwargs)
|
||||||
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo], output_combo)
|
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo], output_combo)
|
||||||
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot]).then(None, [switchy_bt], None, _js=r"(fn)=>on_plugin_exe_complete(fn)")
|
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot])
|
||||||
cancel_handles.append(click_handle)
|
cancel_handles.append(click_handle)
|
||||||
# 终止按钮的回调函数注册
|
# 终止按钮的回调函数注册
|
||||||
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
||||||
@@ -343,14 +374,11 @@ def main():
|
|||||||
audio_mic.stream(deal_audio, inputs=[audio_mic, cookies])
|
audio_mic.stream(deal_audio, inputs=[audio_mic, cookies])
|
||||||
|
|
||||||
|
|
||||||
app_block.load(assign_user_uuid, inputs=[cookies], outputs=[cookies])
|
demo.load(init_cookie, inputs=[cookies], outputs=[cookies])
|
||||||
|
demo.load(persistent_cookie_reload, inputs = [py_pickle_cookie, cookies],
|
||||||
from shared_utils.cookie_manager import load_web_cookie_cache__fn_builder
|
outputs = [py_pickle_cookie, cookies, *customize_btns.values(), *predefined_btns.values()], _js=js_code_for_persistent_cookie_init)
|
||||||
load_web_cookie_cache = load_web_cookie_cache__fn_builder(customize_btns, cookies, predefined_btns)
|
demo.load(None, inputs=[dark_mode], outputs=None, _js="""(dark_mode)=>{apply_cookie_for_checkbox(dark_mode);}""") # 配置暗色主题或亮色主题
|
||||||
app_block.load(load_web_cookie_cache, inputs = [web_cookie_cache, cookies],
|
demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}')
|
||||||
outputs = [web_cookie_cache, cookies, *customize_btns.values(), *predefined_btns.values()], _js=js_code_for_persistent_cookie_init)
|
|
||||||
|
|
||||||
app_block.load(None, inputs=[], outputs=None, _js=f"""()=>GptAcademicJavaScriptInit("{DARK_MODE}","{INIT_SYS_PROMPT}","{ADD_WAIFU}","{LAYOUT}","{TTS_TYPE}")""") # 配置暗色主题或亮色主题
|
|
||||||
|
|
||||||
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
||||||
def run_delayed_tasks():
|
def run_delayed_tasks():
|
||||||
@@ -367,13 +395,18 @@ def main():
|
|||||||
threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面
|
threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面
|
||||||
threading.Thread(target=warm_up_mods, name="warm-up", daemon=True).start() # 预热tiktoken模块
|
threading.Thread(target=warm_up_mods, name="warm-up", daemon=True).start() # 预热tiktoken模块
|
||||||
|
|
||||||
# 运行一些异步任务:自动更新、打开浏览器页面、预热tiktoken模块
|
|
||||||
run_delayed_tasks()
|
run_delayed_tasks()
|
||||||
|
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=False, favicon_path="docs/logo.png", blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"])
|
||||||
|
|
||||||
# 最后,正式开始服务
|
|
||||||
from shared_utils.fastapi_server import start_app
|
|
||||||
start_app(app_block, CONCURRENT_COUNT, AUTHENTICATION, PORT, SSL_KEYFILE, SSL_CERTFILE)
|
|
||||||
|
|
||||||
|
# 如果需要在二级路径下运行
|
||||||
|
# CUSTOM_PATH = get_conf('CUSTOM_PATH')
|
||||||
|
# if CUSTOM_PATH != "/":
|
||||||
|
# from toolbox import run_gradio_in_subpath
|
||||||
|
# run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
|
||||||
|
# else:
|
||||||
|
# demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png",
|
||||||
|
# blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"])
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
@@ -47,7 +47,7 @@ def backup_and_download(current_version, remote_version):
|
|||||||
shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history'])
|
shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history'])
|
||||||
proxies = get_conf('proxies')
|
proxies = get_conf('proxies')
|
||||||
try: r = requests.get('https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True)
|
try: r = requests.get('https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True)
|
||||||
except: r = requests.get('https://public.agent-matrix.com/publish/master.zip', proxies=proxies, stream=True)
|
except: r = requests.get('https://public.gpt-academic.top/publish/master.zip', proxies=proxies, stream=True)
|
||||||
zip_file_path = backup_dir+'/master.zip'
|
zip_file_path = backup_dir+'/master.zip'
|
||||||
with open(zip_file_path, 'wb+') as f:
|
with open(zip_file_path, 'wb+') as f:
|
||||||
f.write(r.content)
|
f.write(r.content)
|
||||||
@@ -113,7 +113,7 @@ def auto_update(raise_error=False):
|
|||||||
import json
|
import json
|
||||||
proxies = get_conf('proxies')
|
proxies = get_conf('proxies')
|
||||||
try: response = requests.get("https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5)
|
try: response = requests.get("https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5)
|
||||||
except: response = requests.get("https://public.agent-matrix.com/publish/version", proxies=proxies, timeout=5)
|
except: response = requests.get("https://public.gpt-academic.top/publish/version", proxies=proxies, timeout=5)
|
||||||
remote_json_data = json.loads(response.text)
|
remote_json_data = json.loads(response.text)
|
||||||
remote_version = remote_json_data['version']
|
remote_version = remote_json_data['version']
|
||||||
if remote_json_data["show_feature"]:
|
if remote_json_data["show_feature"]:
|
||||||
|
|||||||
113
config.py
113
config.py
@@ -11,6 +11,10 @@
|
|||||||
API_KEY = "此处填API密钥" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4"
|
API_KEY = "此处填API密钥" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4"
|
||||||
|
|
||||||
|
|
||||||
|
# [step 1]>> API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下,还需要填写组织(格式如org-123456789abcdefghijklmno的),请向下翻,找 API_ORG 设置项
|
||||||
|
API_KEY = "此处填API密钥" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4"
|
||||||
|
|
||||||
|
|
||||||
# [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改;如果使用本地或无地域限制的大模型时,此处也不需要修改
|
# [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改;如果使用本地或无地域限制的大模型时,此处也不需要修改
|
||||||
USE_PROXY = False
|
USE_PROXY = False
|
||||||
if USE_PROXY:
|
if USE_PROXY:
|
||||||
@@ -30,37 +34,11 @@ if USE_PROXY:
|
|||||||
else:
|
else:
|
||||||
proxies = None
|
proxies = None
|
||||||
|
|
||||||
# [step 3]>> 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
# ------------------------------------ 以下配置可以优化体验, 但大部分场合下并不需要修改 ------------------------------------
|
||||||
LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
|
|
||||||
AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
|
|
||||||
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
|
||||||
"gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-4v", "glm-3-turbo",
|
|
||||||
"gemini-pro", "chatglm3"
|
|
||||||
]
|
|
||||||
# --- --- --- ---
|
|
||||||
# P.S. 其他可用的模型还包括
|
|
||||||
# AVAIL_LLM_MODELS = [
|
|
||||||
# "qianfan", "deepseekcoder",
|
|
||||||
# "spark", "sparkv2", "sparkv3", "sparkv3.5",
|
|
||||||
# "qwen-turbo", "qwen-plus", "qwen-max", "qwen-local",
|
|
||||||
# "moonshot-v1-128k", "moonshot-v1-32k", "moonshot-v1-8k",
|
|
||||||
# "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-0125"
|
|
||||||
# "claude-3-haiku-20240307","claude-3-sonnet-20240229","claude-3-opus-20240229", "claude-2.1", "claude-instant-1.2",
|
|
||||||
# "moss", "llama2", "chatglm_onnx", "internlm", "jittorllms_pangualpha", "jittorllms_llama",
|
|
||||||
# "yi-34b-chat-0205", "yi-34b-chat-200k"
|
|
||||||
# ]
|
|
||||||
# --- --- --- ---
|
|
||||||
# 此外,您还可以在接入one-api/vllm/ollama时,
|
|
||||||
# 使用"one-api-*","vllm-*","ollama-*"前缀直接使用非标准方式接入的模型,例如
|
|
||||||
# AVAIL_LLM_MODELS = ["one-api-claude-3-sonnet-20240229(max_token=100000)", "ollama-phi3(max_token=4096)"]
|
|
||||||
# --- --- --- ---
|
|
||||||
|
|
||||||
|
|
||||||
# --------------- 以下配置可以优化体验 ---------------
|
|
||||||
|
|
||||||
# 重新URL重新定向,实现更换API_URL的作用(高危设置! 常规情况下不要修改! 通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!)
|
# 重新URL重新定向,实现更换API_URL的作用(高危设置! 常规情况下不要修改! 通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!)
|
||||||
# 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
|
# 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
|
||||||
# 举例: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://reverse-proxy-url/v1/chat/completions", "http://localhost:11434/api/chat": "在这里填写您ollama的URL"}
|
# 举例: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://reverse-proxy-url/v1/chat/completions"}
|
||||||
API_URL_REDIRECT = {}
|
API_URL_REDIRECT = {}
|
||||||
|
|
||||||
|
|
||||||
@@ -71,7 +49,7 @@ DEFAULT_WORKER_NUM = 3
|
|||||||
|
|
||||||
# 色彩主题, 可选 ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast"]
|
# 色彩主题, 可选 ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast"]
|
||||||
# 更多主题, 请查阅Gradio主题商店: https://huggingface.co/spaces/gradio/theme-gallery 可选 ["Gstaff/Xkcd", "NoCrypt/Miku", ...]
|
# 更多主题, 请查阅Gradio主题商店: https://huggingface.co/spaces/gradio/theme-gallery 可选 ["Gstaff/Xkcd", "NoCrypt/Miku", ...]
|
||||||
THEME = "Default"
|
THEME = "Chuanhu-Small-and-Beautiful"
|
||||||
AVAIL_THEMES = ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast", "Gstaff/Xkcd", "NoCrypt/Miku"]
|
AVAIL_THEMES = ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast", "Gstaff/Xkcd", "NoCrypt/Miku"]
|
||||||
|
|
||||||
|
|
||||||
@@ -92,7 +70,7 @@ LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下
|
|||||||
|
|
||||||
|
|
||||||
# 暗色模式 / 亮色模式
|
# 暗色模式 / 亮色模式
|
||||||
DARK_MODE = True
|
DARK_MODE = False
|
||||||
|
|
||||||
|
|
||||||
# 发送请求到OpenAI后,等待多久判定为超时
|
# 发送请求到OpenAI后,等待多久判定为超时
|
||||||
@@ -106,11 +84,28 @@ WEB_PORT = -1
|
|||||||
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
||||||
MAX_RETRY = 2
|
MAX_RETRY = 2
|
||||||
|
|
||||||
|
# OpenAI模型选择是(gpt4现在只对申请成功的人开放)
|
||||||
|
LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm"
|
||||||
|
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "api2d-gpt-3.5-turbo", "spark", "azure-gpt-3.5"]
|
||||||
|
|
||||||
# 插件分类默认选项
|
# 插件分类默认选项
|
||||||
DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
||||||
|
|
||||||
|
|
||||||
|
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
||||||
|
LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
|
||||||
|
AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview",
|
||||||
|
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
||||||
|
"gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-3-turbo",
|
||||||
|
"gemini-pro", "chatglm3", "claude-2"]
|
||||||
|
# P.S. 其他可用的模型还包括 [
|
||||||
|
# "moss", "qwen-turbo", "qwen-plus", "qwen-max"
|
||||||
|
# "zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen-local", "gpt-3.5-turbo-0613",
|
||||||
|
# "gpt-3.5-turbo-16k-0613", "gpt-3.5-random", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
|
||||||
|
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"
|
||||||
|
# ]
|
||||||
|
|
||||||
|
|
||||||
# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4"
|
# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4"
|
||||||
MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
|
MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
|
||||||
|
|
||||||
@@ -139,7 +134,6 @@ CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b
|
|||||||
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
||||||
LOCAL_MODEL_QUANT = "FP16" # 默认 "FP16" "INT4" 启用量化INT4版本 "INT8" 启用量化INT8版本
|
LOCAL_MODEL_QUANT = "FP16" # 默认 "FP16" "INT4" 启用量化INT4版本 "INT8" 启用量化INT8版本
|
||||||
|
|
||||||
|
|
||||||
# 设置gradio的并行线程数(不需要修改)
|
# 设置gradio的并行线程数(不需要修改)
|
||||||
CONCURRENT_COUNT = 100
|
CONCURRENT_COUNT = 100
|
||||||
|
|
||||||
@@ -149,7 +143,7 @@ AUTO_CLEAR_TXT = False
|
|||||||
|
|
||||||
|
|
||||||
# 加一个live2d装饰
|
# 加一个live2d装饰
|
||||||
ADD_WAIFU = False
|
ADD_WAIFU = True
|
||||||
|
|
||||||
|
|
||||||
# 设置用户名和密码(不需要修改)(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
|
# 设置用户名和密码(不需要修改)(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
|
||||||
@@ -157,8 +151,7 @@ ADD_WAIFU = False
|
|||||||
AUTHENTICATION = []
|
AUTHENTICATION = []
|
||||||
|
|
||||||
|
|
||||||
# 如果需要在二级路径下运行(常规情况下,不要修改!!)
|
# 如果需要在二级路径下运行(常规情况下,不要修改!!)(需要配合修改main.py才能生效!)
|
||||||
# (举例 CUSTOM_PATH = "/gpt_academic",可以让软件运行在 http://ip:port/gpt_academic/ 下。)
|
|
||||||
CUSTOM_PATH = "/"
|
CUSTOM_PATH = "/"
|
||||||
|
|
||||||
|
|
||||||
@@ -186,8 +179,14 @@ AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.
|
|||||||
AZURE_CFG_ARRAY = {}
|
AZURE_CFG_ARRAY = {}
|
||||||
|
|
||||||
|
|
||||||
# 阿里云实时语音识别 配置难度较高
|
# 使用Newbing (不推荐使用,未来将删除)
|
||||||
# 参考 https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md
|
NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
|
||||||
|
NEWBING_COOKIES = """
|
||||||
|
put your new bing cookies here
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
# 阿里云实时语音识别 配置难度较高 仅建议高手用户使用 参考 https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md
|
||||||
ENABLE_AUDIO = False
|
ENABLE_AUDIO = False
|
||||||
ALIYUN_TOKEN="" # 例如 f37f30e0f9934c34a992f6f64f7eba4f
|
ALIYUN_TOKEN="" # 例如 f37f30e0f9934c34a992f6f64f7eba4f
|
||||||
ALIYUN_APPKEY="" # 例如 RoPlZrM88DnAFkZK
|
ALIYUN_APPKEY="" # 例如 RoPlZrM88DnAFkZK
|
||||||
@@ -195,12 +194,6 @@ ALIYUN_ACCESSKEY="" # (无需填写)
|
|||||||
ALIYUN_SECRET="" # (无需填写)
|
ALIYUN_SECRET="" # (无需填写)
|
||||||
|
|
||||||
|
|
||||||
# GPT-SOVITS 文本转语音服务的运行地址(将语言模型的生成文本朗读出来)
|
|
||||||
TTS_TYPE = "DISABLE" # LOCAL / LOCAL_SOVITS_API / DISABLE
|
|
||||||
GPT_SOVITS_URL = ""
|
|
||||||
EDGE_TTS_VOICE = "zh-CN-XiaoxiaoNeural"
|
|
||||||
|
|
||||||
|
|
||||||
# 接入讯飞星火大模型 https://console.xfyun.cn/services/iat
|
# 接入讯飞星火大模型 https://console.xfyun.cn/services/iat
|
||||||
XFYUN_APPID = "00000000"
|
XFYUN_APPID = "00000000"
|
||||||
XFYUN_API_SECRET = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
|
XFYUN_API_SECRET = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
|
||||||
@@ -212,27 +205,21 @@ ZHIPUAI_API_KEY = ""
|
|||||||
ZHIPUAI_MODEL = "" # 此选项已废弃,不再需要填写
|
ZHIPUAI_MODEL = "" # 此选项已废弃,不再需要填写
|
||||||
|
|
||||||
|
|
||||||
|
# # 火山引擎YUNQUE大模型
|
||||||
|
# YUNQUE_SECRET_KEY = ""
|
||||||
|
# YUNQUE_ACCESS_KEY = ""
|
||||||
|
# YUNQUE_MODEL = ""
|
||||||
|
|
||||||
|
|
||||||
# Claude API KEY
|
# Claude API KEY
|
||||||
ANTHROPIC_API_KEY = ""
|
ANTHROPIC_API_KEY = ""
|
||||||
|
|
||||||
|
|
||||||
# 月之暗面 API KEY
|
|
||||||
MOONSHOT_API_KEY = ""
|
|
||||||
|
|
||||||
|
|
||||||
# 零一万物(Yi Model) API KEY
|
|
||||||
YIMODEL_API_KEY = ""
|
|
||||||
|
|
||||||
|
|
||||||
# Mathpix 拥有执行PDF的OCR功能,但是需要注册账号
|
# Mathpix 拥有执行PDF的OCR功能,但是需要注册账号
|
||||||
MATHPIX_APPID = ""
|
MATHPIX_APPID = ""
|
||||||
MATHPIX_APPKEY = ""
|
MATHPIX_APPKEY = ""
|
||||||
|
|
||||||
|
|
||||||
# DOC2X的PDF解析服务,注册账号并获取API KEY: https://doc2x.noedgeai.com/login
|
|
||||||
DOC2X_API_KEY = ""
|
|
||||||
|
|
||||||
|
|
||||||
# 自定义API KEY格式
|
# 自定义API KEY格式
|
||||||
CUSTOM_API_KEY_PATTERN = ""
|
CUSTOM_API_KEY_PATTERN = ""
|
||||||
|
|
||||||
@@ -242,7 +229,7 @@ GEMINI_API_KEY = ''
|
|||||||
|
|
||||||
|
|
||||||
# HUGGINGFACE的TOKEN,下载LLAMA时起作用 https://huggingface.co/docs/hub/security-tokens
|
# HUGGINGFACE的TOKEN,下载LLAMA时起作用 https://huggingface.co/docs/hub/security-tokens
|
||||||
HUGGINGFACE_ACCESS_TOKEN = "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"
|
HUGGINGFACE_ACCESS_TOKEN = ""
|
||||||
|
|
||||||
|
|
||||||
# GROBID服务器地址(填写多个可以均衡负载),用于高质量地读取PDF文档
|
# GROBID服务器地址(填写多个可以均衡负载),用于高质量地读取PDF文档
|
||||||
@@ -286,11 +273,7 @@ PLUGIN_HOT_RELOAD = False
|
|||||||
# 自定义按钮的最大数量限制
|
# 自定义按钮的最大数量限制
|
||||||
NUM_CUSTOM_BASIC_BTN = 4
|
NUM_CUSTOM_BASIC_BTN = 4
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
--------------- 配置关联关系说明 ---------------
|
|
||||||
|
|
||||||
在线大模型配置关联关系示意图
|
在线大模型配置关联关系示意图
|
||||||
│
|
│
|
||||||
├── "gpt-3.5-turbo" 等openai模型
|
├── "gpt-3.5-turbo" 等openai模型
|
||||||
@@ -314,7 +297,7 @@ NUM_CUSTOM_BASIC_BTN = 4
|
|||||||
│ ├── XFYUN_API_SECRET
|
│ ├── XFYUN_API_SECRET
|
||||||
│ └── XFYUN_API_KEY
|
│ └── XFYUN_API_KEY
|
||||||
│
|
│
|
||||||
├── "claude-3-opus-20240229" 等claude模型
|
├── "claude-1-100k" 等claude模型
|
||||||
│ └── ANTHROPIC_API_KEY
|
│ └── ANTHROPIC_API_KEY
|
||||||
│
|
│
|
||||||
├── "stack-claude"
|
├── "stack-claude"
|
||||||
@@ -329,19 +312,15 @@ NUM_CUSTOM_BASIC_BTN = 4
|
|||||||
├── "glm-4", "glm-3-turbo", "zhipuai" 智谱AI大模型
|
├── "glm-4", "glm-3-turbo", "zhipuai" 智谱AI大模型
|
||||||
│ └── ZHIPUAI_API_KEY
|
│ └── ZHIPUAI_API_KEY
|
||||||
│
|
│
|
||||||
├── "yi-34b-chat-0205", "yi-34b-chat-200k" 等零一万物(Yi Model)大模型
|
|
||||||
│ └── YIMODEL_API_KEY
|
|
||||||
│
|
|
||||||
├── "qwen-turbo" 等通义千问大模型
|
├── "qwen-turbo" 等通义千问大模型
|
||||||
│ └── DASHSCOPE_API_KEY
|
│ └── DASHSCOPE_API_KEY
|
||||||
│
|
│
|
||||||
├── "Gemini"
|
├── "Gemini"
|
||||||
│ └── GEMINI_API_KEY
|
│ └── GEMINI_API_KEY
|
||||||
│
|
│
|
||||||
└── "one-api-...(max_token=...)" 用一种更方便的方式接入one-api多模型管理界面
|
└── "newbing" Newbing接口不再稳定,不推荐使用
|
||||||
├── AVAIL_LLM_MODELS
|
├── NEWBING_STYLE
|
||||||
├── API_KEY
|
└── NEWBING_COOKIES
|
||||||
└── API_URL_REDIRECT
|
|
||||||
|
|
||||||
|
|
||||||
本地大模型示意图
|
本地大模型示意图
|
||||||
|
|||||||
@@ -38,12 +38,12 @@ def get_core_functions():
|
|||||||
|
|
||||||
"总结绘制脑图": {
|
"总结绘制脑图": {
|
||||||
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
||||||
"Prefix": '''"""\n\n''',
|
"Prefix": r"",
|
||||||
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
||||||
"Suffix":
|
"Suffix":
|
||||||
# dedent() 函数用于去除多行字符串的缩进
|
# dedent() 函数用于去除多行字符串的缩进
|
||||||
dedent("\n\n"+r'''
|
dedent("\n"+r'''
|
||||||
"""
|
==============================
|
||||||
|
|
||||||
使用mermaid flowchart对以上文本进行总结,概括上述段落的内容以及内在逻辑关系,例如:
|
使用mermaid flowchart对以上文本进行总结,概括上述段落的内容以及内在逻辑关系,例如:
|
||||||
|
|
||||||
@@ -57,7 +57,7 @@ def get_core_functions():
|
|||||||
C --> |"箭头名2"| F["节点名6"]
|
C --> |"箭头名2"| F["节点名6"]
|
||||||
```
|
```
|
||||||
|
|
||||||
注意:
|
警告:
|
||||||
(1)使用中文
|
(1)使用中文
|
||||||
(2)节点名字使用引号包裹,如["Laptop"]
|
(2)节点名字使用引号包裹,如["Laptop"]
|
||||||
(3)`|` 和 `"`之间不要存在空格
|
(3)`|` 和 `"`之间不要存在空格
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ def get_crazy_functions():
|
|||||||
from crazy_functions.辅助功能 import 清除缓存
|
from crazy_functions.辅助功能 import 清除缓存
|
||||||
from crazy_functions.批量Markdown翻译 import Markdown英译中
|
from crazy_functions.批量Markdown翻译 import Markdown英译中
|
||||||
from crazy_functions.批量总结PDF文档 import 批量总结PDF文档
|
from crazy_functions.批量总结PDF文档 import 批量总结PDF文档
|
||||||
from crazy_functions.PDF批量翻译 import 批量翻译PDF文档
|
from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档
|
||||||
from crazy_functions.谷歌检索小助手 import 谷歌检索小助手
|
from crazy_functions.谷歌检索小助手 import 谷歌检索小助手
|
||||||
from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入
|
from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入
|
||||||
from crazy_functions.Latex全文润色 import Latex中文润色
|
from crazy_functions.Latex全文润色 import Latex中文润色
|
||||||
|
|||||||
232
crazy_functions/CodeInterpreter.py
普通文件
232
crazy_functions/CodeInterpreter.py
普通文件
@@ -0,0 +1,232 @@
|
|||||||
|
from collections.abc import Callable, Iterable, Mapping
|
||||||
|
from typing import Any
|
||||||
|
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc
|
||||||
|
from toolbox import promote_file_to_downloadzone, get_log_folder
|
||||||
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
|
from .crazy_utils import input_clipping, try_install_deps
|
||||||
|
from multiprocessing import Process, Pipe
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
|
||||||
|
templete = """
|
||||||
|
```python
|
||||||
|
import ... # Put dependencies here, e.g. import numpy as np
|
||||||
|
|
||||||
|
class TerminalFunction(object): # Do not change the name of the class, The name of the class must be `TerminalFunction`
|
||||||
|
|
||||||
|
def run(self, path): # The name of the function must be `run`, it takes only a positional argument.
|
||||||
|
# rewrite the function you have just written here
|
||||||
|
...
|
||||||
|
return generated_file_path
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
|
||||||
|
def inspect_dependency(chatbot, history):
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_code_block(reply):
|
||||||
|
import re
|
||||||
|
pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
|
||||||
|
matches = re.findall(pattern, reply) # find all code blocks in text
|
||||||
|
if len(matches) == 1:
|
||||||
|
return matches[0].strip('python') # code block
|
||||||
|
for match in matches:
|
||||||
|
if 'class TerminalFunction' in match:
|
||||||
|
return match.strip('python') # code block
|
||||||
|
raise RuntimeError("GPT is not generating proper code.")
|
||||||
|
|
||||||
|
def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history):
|
||||||
|
# 输入
|
||||||
|
prompt_compose = [
|
||||||
|
f'Your job:\n'
|
||||||
|
f'1. write a single Python function, which takes a path of a `{file_type}` file as the only argument and returns a `string` containing the result of analysis or the path of generated files. \n',
|
||||||
|
f"2. You should write this function to perform following task: " + txt + "\n",
|
||||||
|
f"3. Wrap the output python function with markdown codeblock."
|
||||||
|
]
|
||||||
|
i_say = "".join(prompt_compose)
|
||||||
|
demo = []
|
||||||
|
|
||||||
|
# 第一步
|
||||||
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
inputs=i_say, inputs_show_user=i_say,
|
||||||
|
llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo,
|
||||||
|
sys_prompt= r"You are a programmer."
|
||||||
|
)
|
||||||
|
history.extend([i_say, gpt_say])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||||
|
|
||||||
|
# 第二步
|
||||||
|
prompt_compose = [
|
||||||
|
"If previous stage is successful, rewrite the function you have just written to satisfy following templete: \n",
|
||||||
|
templete
|
||||||
|
]
|
||||||
|
i_say = "".join(prompt_compose); inputs_show_user = "If previous stage is successful, rewrite the function you have just written to satisfy executable templete. "
|
||||||
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
inputs=i_say, inputs_show_user=inputs_show_user,
|
||||||
|
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||||
|
sys_prompt= r"You are a programmer."
|
||||||
|
)
|
||||||
|
code_to_return = gpt_say
|
||||||
|
history.extend([i_say, gpt_say])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||||
|
|
||||||
|
# # 第三步
|
||||||
|
# i_say = "Please list to packages to install to run the code above. Then show me how to use `try_install_deps` function to install them."
|
||||||
|
# i_say += 'For instance. `try_install_deps(["opencv-python", "scipy", "numpy"])`'
|
||||||
|
# installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
# inputs=i_say, inputs_show_user=inputs_show_user,
|
||||||
|
# llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||||
|
# sys_prompt= r"You are a programmer."
|
||||||
|
# )
|
||||||
|
# # # 第三步
|
||||||
|
# i_say = "Show me how to use `pip` to install packages to run the code above. "
|
||||||
|
# i_say += 'For instance. `pip install -r opencv-python scipy numpy`'
|
||||||
|
# installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
# inputs=i_say, inputs_show_user=i_say,
|
||||||
|
# llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||||
|
# sys_prompt= r"You are a programmer."
|
||||||
|
# )
|
||||||
|
installation_advance = ""
|
||||||
|
|
||||||
|
return code_to_return, installation_advance, txt, file_type, llm_kwargs, chatbot, history
|
||||||
|
|
||||||
|
def make_module(code):
|
||||||
|
module_file = 'gpt_fn_' + gen_time_str().replace('-','_')
|
||||||
|
with open(f'{get_log_folder()}/{module_file}.py', 'w', encoding='utf8') as f:
|
||||||
|
f.write(code)
|
||||||
|
|
||||||
|
def get_class_name(class_string):
|
||||||
|
import re
|
||||||
|
# Use regex to extract the class name
|
||||||
|
class_name = re.search(r'class (\w+)\(', class_string).group(1)
|
||||||
|
return class_name
|
||||||
|
|
||||||
|
class_name = get_class_name(code)
|
||||||
|
return f"{get_log_folder().replace('/', '.')}.{module_file}->{class_name}"
|
||||||
|
|
||||||
|
def init_module_instance(module):
|
||||||
|
import importlib
|
||||||
|
module_, class_ = module.split('->')
|
||||||
|
init_f = getattr(importlib.import_module(module_), class_)
|
||||||
|
return init_f()
|
||||||
|
|
||||||
|
def for_immediate_show_off_when_possible(file_type, fp, chatbot):
|
||||||
|
if file_type in ['png', 'jpg']:
|
||||||
|
image_path = os.path.abspath(fp)
|
||||||
|
chatbot.append(['这是一张图片, 展示如下:',
|
||||||
|
f'本地文件地址: <br/>`{image_path}`<br/>'+
|
||||||
|
f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'
|
||||||
|
])
|
||||||
|
return chatbot
|
||||||
|
|
||||||
|
def subprocess_worker(instance, file_path, return_dict):
|
||||||
|
return_dict['result'] = instance.run(file_path)
|
||||||
|
|
||||||
|
def have_any_recent_upload_files(chatbot):
|
||||||
|
_5min = 5 * 60
|
||||||
|
if not chatbot: return False # chatbot is None
|
||||||
|
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
||||||
|
if not most_recent_uploaded: return False # most_recent_uploaded is None
|
||||||
|
if time.time() - most_recent_uploaded["time"] < _5min: return True # most_recent_uploaded is new
|
||||||
|
else: return False # most_recent_uploaded is too old
|
||||||
|
|
||||||
|
def get_recent_file_prompt_support(chatbot):
|
||||||
|
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
||||||
|
path = most_recent_uploaded['path']
|
||||||
|
return path
|
||||||
|
|
||||||
|
@CatchException
|
||||||
|
def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
"""
|
||||||
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
|
plugin_kwargs 插件模型的参数,暂时没有用武之地
|
||||||
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
|
history 聊天历史,前情提要
|
||||||
|
system_prompt 给gpt的静默提醒
|
||||||
|
web_port 当前软件运行的端口号
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
# 清空历史,以免输入溢出
|
||||||
|
history = []; clear_file_downloadzone(chatbot)
|
||||||
|
|
||||||
|
# 基本信息:功能、贡献者
|
||||||
|
chatbot.append([
|
||||||
|
"函数插件功能?",
|
||||||
|
"CodeInterpreter开源版, 此插件处于开发阶段, 建议暂时不要使用, 插件初始化中 ..."
|
||||||
|
])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
if have_any_recent_upload_files(chatbot):
|
||||||
|
file_path = get_recent_file_prompt_support(chatbot)
|
||||||
|
else:
|
||||||
|
chatbot.append(["文件检索", "没有发现任何近期上传的文件。"])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
# 读取文件
|
||||||
|
if ("recently_uploaded_files" in plugin_kwargs) and (plugin_kwargs["recently_uploaded_files"] == ""): plugin_kwargs.pop("recently_uploaded_files")
|
||||||
|
recently_uploaded_files = plugin_kwargs.get("recently_uploaded_files", None)
|
||||||
|
file_path = recently_uploaded_files[-1]
|
||||||
|
file_type = file_path.split('.')[-1]
|
||||||
|
|
||||||
|
# 粗心检查
|
||||||
|
if is_the_upload_folder(txt):
|
||||||
|
chatbot.append([
|
||||||
|
"...",
|
||||||
|
f"请在输入框内填写需求,然后再次点击该插件(文件路径 {file_path} 已经被记忆)"
|
||||||
|
])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
# 开始干正事
|
||||||
|
for j in range(5): # 最多重试5次
|
||||||
|
try:
|
||||||
|
code, installation_advance, txt, file_type, llm_kwargs, chatbot, history = \
|
||||||
|
yield from gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history)
|
||||||
|
code = get_code_block(code)
|
||||||
|
res = make_module(code)
|
||||||
|
instance = init_module_instance(res)
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
chatbot.append([f"第{j}次代码生成尝试,失败了", f"错误追踪\n```\n{trimmed_format_exc()}\n```\n"])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
# 代码生成结束, 开始执行
|
||||||
|
try:
|
||||||
|
import multiprocessing
|
||||||
|
manager = multiprocessing.Manager()
|
||||||
|
return_dict = manager.dict()
|
||||||
|
|
||||||
|
p = multiprocessing.Process(target=subprocess_worker, args=(instance, file_path, return_dict))
|
||||||
|
# only has 10 seconds to run
|
||||||
|
p.start(); p.join(timeout=10)
|
||||||
|
if p.is_alive(): p.terminate(); p.join()
|
||||||
|
p.close()
|
||||||
|
res = return_dict['result']
|
||||||
|
# res = instance.run(file_path)
|
||||||
|
except Exception as e:
|
||||||
|
chatbot.append(["执行失败了", f"错误追踪\n```\n{trimmed_format_exc()}\n```\n"])
|
||||||
|
# chatbot.append(["如果是缺乏依赖,请参考以下建议", installation_advance])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
# 顺利完成,收尾
|
||||||
|
res = str(res)
|
||||||
|
if os.path.exists(res):
|
||||||
|
chatbot.append(["执行成功了,结果是一个有效文件", "结果:" + res])
|
||||||
|
new_file_path = promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||||
|
chatbot = for_immediate_show_off_when_possible(file_type, new_file_path, chatbot)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||||
|
else:
|
||||||
|
chatbot.append(["执行成功了,结果是一个字符串", "结果:" + res])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||||
|
|
||||||
|
"""
|
||||||
|
测试:
|
||||||
|
裁剪图像,保留下半部分
|
||||||
|
交换图像的蓝色通道和红色通道
|
||||||
|
将图像转为灰度图像
|
||||||
|
将csv文件转excel表格
|
||||||
|
"""
|
||||||
106
crazy_functions/Langchain知识库.py
普通文件
106
crazy_functions/Langchain知识库.py
普通文件
@@ -0,0 +1,106 @@
|
|||||||
|
from toolbox import CatchException, update_ui, ProxyNetworkActivate, update_ui_lastest_msg
|
||||||
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_files_from_everything
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@CatchException
|
||||||
|
def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
"""
|
||||||
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
|
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
|
||||||
|
plugin_kwargs 插件模型的参数,暂时没有用武之地
|
||||||
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
|
history 聊天历史,前情提要
|
||||||
|
system_prompt 给gpt的静默提醒
|
||||||
|
web_port 当前软件运行的端口号
|
||||||
|
"""
|
||||||
|
history = [] # 清空历史,以免输入溢出
|
||||||
|
|
||||||
|
# < --------------------读取参数--------------- >
|
||||||
|
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||||
|
kai_id = plugin_kwargs.get("advanced_arg", 'default')
|
||||||
|
|
||||||
|
chatbot.append((f"向`{kai_id}`知识库中添加文件。", "[Local Message] 从一批文件(txt, md, tex)中读取数据构建知识库, 然后进行问答。"))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
# resolve deps
|
||||||
|
try:
|
||||||
|
from zh_langchain import construct_vector_store
|
||||||
|
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||||
|
from .crazy_utils import knowledge_archive_interface
|
||||||
|
except Exception as e:
|
||||||
|
chatbot.append(["依赖不足", "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
from .crazy_utils import try_install_deps
|
||||||
|
try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
||||||
|
yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
|
||||||
|
return
|
||||||
|
|
||||||
|
# < --------------------读取文件--------------- >
|
||||||
|
file_manifest = []
|
||||||
|
spl = ["txt", "doc", "docx", "email", "epub", "html", "json", "md", "msg", "pdf", "ppt", "pptx", "rtf"]
|
||||||
|
for sp in spl:
|
||||||
|
_, file_manifest_tmp, _ = get_files_from_everything(txt, type=f'.{sp}')
|
||||||
|
file_manifest += file_manifest_tmp
|
||||||
|
|
||||||
|
if len(file_manifest) == 0:
|
||||||
|
chatbot.append(["没有找到任何可读取文件", "当前支持的格式包括: txt, md, docx, pptx, pdf, json等"])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
# < -------------------预热文本向量化模组--------------- >
|
||||||
|
chatbot.append(['<br/>'.join(file_manifest), "正在预热文本向量化模组, 如果是第一次运行, 将消耗较长时间下载中文向量化模型..."])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
print('Checking Text2vec ...')
|
||||||
|
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||||
|
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
|
||||||
|
HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
|
||||||
|
|
||||||
|
# < -------------------构建知识库--------------- >
|
||||||
|
chatbot.append(['<br/>'.join(file_manifest), "正在构建知识库..."])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
print('Establishing knowledge archive ...')
|
||||||
|
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
|
||||||
|
kai = knowledge_archive_interface()
|
||||||
|
kai.feed_archive(file_manifest=file_manifest, id=kai_id)
|
||||||
|
kai_files = kai.get_loaded_file()
|
||||||
|
kai_files = '<br/>'.join(kai_files)
|
||||||
|
# chatbot.append(['知识库构建成功', "正在将知识库存储至cookie中"])
|
||||||
|
# yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
# chatbot._cookies['langchain_plugin_embedding'] = kai.get_current_archive_id()
|
||||||
|
# chatbot._cookies['lock_plugin'] = 'crazy_functions.Langchain知识库->读取知识库作答'
|
||||||
|
# chatbot.append(['完成', "“根据知识库作答”函数插件已经接管问答系统, 提问吧! 但注意, 您接下来不能再使用其他插件了,刷新页面即可以退出知识库问答模式。"])
|
||||||
|
chatbot.append(['构建完成', f"当前知识库内的有效文件:\n\n---\n\n{kai_files}\n\n---\n\n请切换至“知识库问答”插件进行知识库访问, 或者使用此插件继续上传更多文件。"])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||||
|
|
||||||
|
@CatchException
|
||||||
|
def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port=-1):
|
||||||
|
# resolve deps
|
||||||
|
try:
|
||||||
|
from zh_langchain import construct_vector_store
|
||||||
|
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||||
|
from .crazy_utils import knowledge_archive_interface
|
||||||
|
except Exception as e:
|
||||||
|
chatbot.append(["依赖不足", "导入依赖失败。正在尝试自动安装,请查看终端的输出或耐心等待..."])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
from .crazy_utils import try_install_deps
|
||||||
|
try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
|
||||||
|
yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
|
||||||
|
return
|
||||||
|
|
||||||
|
# < ------------------- --------------- >
|
||||||
|
kai = knowledge_archive_interface()
|
||||||
|
|
||||||
|
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||||
|
kai_id = plugin_kwargs.get("advanced_arg", 'default')
|
||||||
|
resp, prompt = kai.answer_with_archive_by_id(txt, kai_id)
|
||||||
|
|
||||||
|
chatbot.append((txt, f'[知识库 {kai_id}] ' + prompt))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||||
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
inputs=prompt, inputs_show_user=txt,
|
||||||
|
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
||||||
|
sys_prompt=system_prompt
|
||||||
|
)
|
||||||
|
history.extend((prompt, gpt_say))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||||
@@ -81,8 +81,8 @@ def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|||||||
# <-------- 多线程润色开始 ---------->
|
# <-------- 多线程润色开始 ---------->
|
||||||
if language == 'en':
|
if language == 'en':
|
||||||
if mode == 'polish':
|
if mode == 'polish':
|
||||||
inputs_array = [r"Below is a section from an academic paper, polish this section to meet the academic standard, " +
|
inputs_array = ["Below is a section from an academic paper, polish this section to meet the academic standard, " +
|
||||||
r"improve the grammar, clarity and overall readability, do not modify any latex command such as \section, \cite and equations:" +
|
"improve the grammar, clarity and overall readability, do not modify any latex command such as \section, \cite and equations:" +
|
||||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||||
else:
|
else:
|
||||||
inputs_array = [r"Below is a section from an academic paper, proofread this section." +
|
inputs_array = [r"Below is a section from an academic paper, proofread this section." +
|
||||||
@@ -93,10 +93,10 @@ def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|||||||
sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)]
|
sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)]
|
||||||
elif language == 'zh':
|
elif language == 'zh':
|
||||||
if mode == 'polish':
|
if mode == 'polish':
|
||||||
inputs_array = [r"以下是一篇学术论文中的一段内容,请将此部分润色以满足学术标准,提高语法、清晰度和整体可读性,不要修改任何LaTeX命令,例如\section,\cite和方程式:" +
|
inputs_array = [f"以下是一篇学术论文中的一段内容,请将此部分润色以满足学术标准,提高语法、清晰度和整体可读性,不要修改任何LaTeX命令,例如\section,\cite和方程式:" +
|
||||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||||
else:
|
else:
|
||||||
inputs_array = [r"以下是一篇学术论文中的一段内容,请对这部分内容进行语法矫正。不要修改任何LaTeX命令,例如\section,\cite和方程式:" +
|
inputs_array = [f"以下是一篇学术论文中的一段内容,请对这部分内容进行语法矫正。不要修改任何LaTeX命令,例如\section,\cite和方程式:" +
|
||||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||||
inputs_show_user_array = [f"润色 {f}" for f in pfg.sp_file_tag]
|
inputs_show_user_array = [f"润色 {f}" for f in pfg.sp_file_tag]
|
||||||
sys_prompt_array=["你是一位专业的中文学术论文作家。" for _ in range(n_split)]
|
sys_prompt_array=["你是一位专业的中文学术论文作家。" for _ in range(n_split)]
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone, check_repeat_upload, map_file_to_sha256
|
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone
|
||||||
from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str
|
from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str
|
||||||
from functools import partial
|
from functools import partial
|
||||||
import glob, os, requests, time, json, tarfile
|
import glob, os, requests, time, json, tarfile
|
||||||
@@ -107,10 +107,6 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
|
|||||||
except ValueError:
|
except ValueError:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if txt.startswith('https://arxiv.org/pdf/'):
|
|
||||||
arxiv_id = txt.split('/')[-1] # 2402.14207v2.pdf
|
|
||||||
txt = arxiv_id.split('v')[0] # 2402.14207
|
|
||||||
|
|
||||||
if ('.' in txt) and ('/' not in txt) and is_float(txt): # is arxiv ID
|
if ('.' in txt) and ('/' not in txt) and is_float(txt): # is arxiv ID
|
||||||
txt = 'https://arxiv.org/abs/' + txt.strip()
|
txt = 'https://arxiv.org/abs/' + txt.strip()
|
||||||
if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID
|
if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID
|
||||||
@@ -125,7 +121,6 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
|
|||||||
time.sleep(1) # 刷新界面
|
time.sleep(1) # 刷新界面
|
||||||
|
|
||||||
url_ = txt # https://arxiv.org/abs/1707.06690
|
url_ = txt # https://arxiv.org/abs/1707.06690
|
||||||
|
|
||||||
if not txt.startswith('https://arxiv.org/abs/'):
|
if not txt.startswith('https://arxiv.org/abs/'):
|
||||||
msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}。"
|
msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}。"
|
||||||
yield from update_ui_lastest_msg(msg, chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui_lastest_msg(msg, chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -443,57 +438,10 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
|
|||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
hash_tag = map_file_to_sha256(file_manifest[0])
|
|
||||||
|
|
||||||
# <-------------- check repeated pdf ------------->
|
|
||||||
chatbot.append([f"检查PDF是否被重复上传", "正在检查..."])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
|
||||||
repeat, project_folder = check_repeat_upload(file_manifest[0], hash_tag)
|
|
||||||
|
|
||||||
except_flag = False
|
|
||||||
|
|
||||||
if repeat:
|
|
||||||
yield from update_ui_lastest_msg(f"发现重复上传,请查收结果(压缩包)...", chatbot=chatbot, history=history)
|
|
||||||
|
|
||||||
try:
|
|
||||||
trans_html_file = [f for f in glob.glob(f'{project_folder}/**/*.trans.html', recursive=True)][0]
|
|
||||||
promote_file_to_downloadzone(trans_html_file, rename_file=None, chatbot=chatbot)
|
|
||||||
|
|
||||||
translate_pdf = [f for f in glob.glob(f'{project_folder}/**/merge_translate_zh.pdf', recursive=True)][0]
|
|
||||||
promote_file_to_downloadzone(translate_pdf, rename_file=None, chatbot=chatbot)
|
|
||||||
|
|
||||||
comparison_pdf = [f for f in glob.glob(f'{project_folder}/**/comparison.pdf', recursive=True)][0]
|
|
||||||
promote_file_to_downloadzone(comparison_pdf, rename_file=None, chatbot=chatbot)
|
|
||||||
|
|
||||||
zip_res = zip_result(project_folder)
|
|
||||||
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
except:
|
|
||||||
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"发现重复上传,但是无法找到相关文件")
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
|
||||||
|
|
||||||
chatbot.append([f"没有相关文件", '尝试重新翻译PDF...'])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
|
||||||
|
|
||||||
except_flag = True
|
|
||||||
|
|
||||||
|
|
||||||
elif not repeat or except_flag:
|
|
||||||
yield from update_ui_lastest_msg(f"未发现重复上传", chatbot=chatbot, history=history)
|
|
||||||
|
|
||||||
# <-------------- convert pdf into tex ------------->
|
# <-------------- convert pdf into tex ------------->
|
||||||
chatbot.append([f"解析项目: {txt}", "正在将PDF转换为tex项目,请耐心等待..."])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
|
||||||
project_folder = pdf2tex_project(file_manifest[0])
|
project_folder = pdf2tex_project(file_manifest[0])
|
||||||
if project_folder is None:
|
|
||||||
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"PDF转换为tex项目失败")
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
|
||||||
return False
|
|
||||||
|
|
||||||
# <-------------- translate latex file into Chinese ------------->
|
# Translate English Latex to Chinese Latex, and compile it
|
||||||
yield from update_ui_lastest_msg("正在tex项目将翻译为中文...", chatbot=chatbot, history=history)
|
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}")
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}")
|
||||||
@@ -506,12 +454,6 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
|
|||||||
# <-------------- move latex project away from temp folder ------------->
|
# <-------------- move latex project away from temp folder ------------->
|
||||||
project_folder = move_project(project_folder)
|
project_folder = move_project(project_folder)
|
||||||
|
|
||||||
# <-------------- set a hash tag for repeat-checking ------------->
|
|
||||||
with open(pj(project_folder, hash_tag + '.tag'), 'w') as f:
|
|
||||||
f.write(hash_tag)
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
|
|
||||||
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
||||||
if not os.path.exists(project_folder + '/merge_translate_zh.tex'):
|
if not os.path.exists(project_folder + '/merge_translate_zh.tex'):
|
||||||
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
||||||
@@ -519,7 +461,6 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
|
|||||||
switch_prompt=_switch_prompt_)
|
switch_prompt=_switch_prompt_)
|
||||||
|
|
||||||
# <-------------- compile PDF ------------->
|
# <-------------- compile PDF ------------->
|
||||||
yield from update_ui_lastest_msg("正在将翻译好的项目tex项目编译为PDF...", chatbot=chatbot, history=history)
|
|
||||||
success = yield from 编译Latex(chatbot, history, main_file_original='merge',
|
success = yield from 编译Latex(chatbot, history, main_file_original='merge',
|
||||||
main_file_modified='merge_translate_zh', mode='translate_zh',
|
main_file_modified='merge_translate_zh', mode='translate_zh',
|
||||||
work_folder_original=project_folder, work_folder_modified=project_folder,
|
work_folder_original=project_folder, work_folder_modified=project_folder,
|
||||||
|
|||||||
306
crazy_functions/Latex输出PDF结果.py
普通文件
306
crazy_functions/Latex输出PDF结果.py
普通文件
@@ -0,0 +1,306 @@
|
|||||||
|
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone
|
||||||
|
from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str
|
||||||
|
from functools import partial
|
||||||
|
import glob, os, requests, time
|
||||||
|
pj = os.path.join
|
||||||
|
ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
||||||
|
|
||||||
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 工具函数 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
|
# 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
|
||||||
|
def switch_prompt(pfg, mode, more_requirement):
|
||||||
|
"""
|
||||||
|
Generate prompts and system prompts based on the mode for proofreading or translating.
|
||||||
|
Args:
|
||||||
|
- pfg: Proofreader or Translator instance.
|
||||||
|
- mode: A string specifying the mode, either 'proofread' or 'translate_zh'.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- inputs_array: A list of strings containing prompts for users to respond to.
|
||||||
|
- sys_prompt_array: A list of strings containing prompts for system prompts.
|
||||||
|
"""
|
||||||
|
n_split = len(pfg.sp_file_contents)
|
||||||
|
if mode == 'proofread_en':
|
||||||
|
inputs_array = [r"Below is a section from an academic paper, proofread this section." +
|
||||||
|
r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " + more_requirement +
|
||||||
|
r"Answer me only with the revised text:" +
|
||||||
|
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||||
|
sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)]
|
||||||
|
elif mode == 'translate_zh':
|
||||||
|
inputs_array = [r"Below is a section from an English academic paper, translate it into Chinese. " + more_requirement +
|
||||||
|
r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " +
|
||||||
|
r"Answer me only with the translated text:" +
|
||||||
|
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||||
|
sys_prompt_array = ["You are a professional translator." for _ in range(n_split)]
|
||||||
|
else:
|
||||||
|
assert False, "未知指令"
|
||||||
|
return inputs_array, sys_prompt_array
|
||||||
|
|
||||||
|
def desend_to_extracted_folder_if_exist(project_folder):
|
||||||
|
"""
|
||||||
|
Descend into the extracted folder if it exists, otherwise return the original folder.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- project_folder: A string specifying the folder path.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- A string specifying the path to the extracted folder, or the original folder if there is no extracted folder.
|
||||||
|
"""
|
||||||
|
maybe_dir = [f for f in glob.glob(f'{project_folder}/*') if os.path.isdir(f)]
|
||||||
|
if len(maybe_dir) == 0: return project_folder
|
||||||
|
if maybe_dir[0].endswith('.extract'): return maybe_dir[0]
|
||||||
|
return project_folder
|
||||||
|
|
||||||
|
def move_project(project_folder, arxiv_id=None):
|
||||||
|
"""
|
||||||
|
Create a new work folder and copy the project folder to it.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- project_folder: A string specifying the folder path of the project.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- A string specifying the path to the new work folder.
|
||||||
|
"""
|
||||||
|
import shutil, time
|
||||||
|
time.sleep(2) # avoid time string conflict
|
||||||
|
if arxiv_id is not None:
|
||||||
|
new_workfolder = pj(ARXIV_CACHE_DIR, arxiv_id, 'workfolder')
|
||||||
|
else:
|
||||||
|
new_workfolder = f'{get_log_folder()}/{gen_time_str()}'
|
||||||
|
try:
|
||||||
|
shutil.rmtree(new_workfolder)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# align subfolder if there is a folder wrapper
|
||||||
|
items = glob.glob(pj(project_folder,'*'))
|
||||||
|
items = [item for item in items if os.path.basename(item)!='__MACOSX']
|
||||||
|
if len(glob.glob(pj(project_folder,'*.tex'))) == 0 and len(items) == 1:
|
||||||
|
if os.path.isdir(items[0]): project_folder = items[0]
|
||||||
|
|
||||||
|
shutil.copytree(src=project_folder, dst=new_workfolder)
|
||||||
|
return new_workfolder
|
||||||
|
|
||||||
|
def arxiv_download(chatbot, history, txt, allow_cache=True):
|
||||||
|
def check_cached_translation_pdf(arxiv_id):
|
||||||
|
translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'translation')
|
||||||
|
if not os.path.exists(translation_dir):
|
||||||
|
os.makedirs(translation_dir)
|
||||||
|
target_file = pj(translation_dir, 'translate_zh.pdf')
|
||||||
|
if os.path.exists(target_file):
|
||||||
|
promote_file_to_downloadzone(target_file, rename_file=None, chatbot=chatbot)
|
||||||
|
target_file_compare = pj(translation_dir, 'comparison.pdf')
|
||||||
|
if os.path.exists(target_file_compare):
|
||||||
|
promote_file_to_downloadzone(target_file_compare, rename_file=None, chatbot=chatbot)
|
||||||
|
return target_file
|
||||||
|
return False
|
||||||
|
def is_float(s):
|
||||||
|
try:
|
||||||
|
float(s)
|
||||||
|
return True
|
||||||
|
except ValueError:
|
||||||
|
return False
|
||||||
|
if ('.' in txt) and ('/' not in txt) and is_float(txt): # is arxiv ID
|
||||||
|
txt = 'https://arxiv.org/abs/' + txt.strip()
|
||||||
|
if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID
|
||||||
|
txt = 'https://arxiv.org/abs/' + txt[:10]
|
||||||
|
if not txt.startswith('https://arxiv.org'):
|
||||||
|
return txt, None
|
||||||
|
|
||||||
|
# <-------------- inspect format ------------->
|
||||||
|
chatbot.append([f"检测到arxiv文档连接", '尝试下载 ...'])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
time.sleep(1) # 刷新界面
|
||||||
|
|
||||||
|
url_ = txt # https://arxiv.org/abs/1707.06690
|
||||||
|
if not txt.startswith('https://arxiv.org/abs/'):
|
||||||
|
msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}。"
|
||||||
|
yield from update_ui_lastest_msg(msg, chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return msg, None
|
||||||
|
# <-------------- set format ------------->
|
||||||
|
arxiv_id = url_.split('/abs/')[-1]
|
||||||
|
if 'v' in arxiv_id: arxiv_id = arxiv_id[:10]
|
||||||
|
cached_translation_pdf = check_cached_translation_pdf(arxiv_id)
|
||||||
|
if cached_translation_pdf and allow_cache: return cached_translation_pdf, arxiv_id
|
||||||
|
|
||||||
|
url_tar = url_.replace('/abs/', '/e-print/')
|
||||||
|
translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'e-print')
|
||||||
|
extract_dst = pj(ARXIV_CACHE_DIR, arxiv_id, 'extract')
|
||||||
|
os.makedirs(translation_dir, exist_ok=True)
|
||||||
|
|
||||||
|
# <-------------- download arxiv source file ------------->
|
||||||
|
dst = pj(translation_dir, arxiv_id+'.tar')
|
||||||
|
if os.path.exists(dst):
|
||||||
|
yield from update_ui_lastest_msg("调用缓存", chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
else:
|
||||||
|
yield from update_ui_lastest_msg("开始下载", chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
proxies = get_conf('proxies')
|
||||||
|
r = requests.get(url_tar, proxies=proxies)
|
||||||
|
with open(dst, 'wb+') as f:
|
||||||
|
f.write(r.content)
|
||||||
|
# <-------------- extract file ------------->
|
||||||
|
yield from update_ui_lastest_msg("下载完成", chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
from toolbox import extract_archive
|
||||||
|
extract_archive(file_path=dst, dest_dir=extract_dst)
|
||||||
|
return extract_dst, arxiv_id
|
||||||
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
|
|
||||||
|
@CatchException
|
||||||
|
def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
# <-------------- information about this plugin ------------->
|
||||||
|
chatbot.append([ "函数插件功能?",
|
||||||
|
"对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
# <-------------- more requirements ------------->
|
||||||
|
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||||
|
more_req = plugin_kwargs.get("advanced_arg", "")
|
||||||
|
_switch_prompt_ = partial(switch_prompt, more_requirement=more_req)
|
||||||
|
|
||||||
|
# <-------------- check deps ------------->
|
||||||
|
try:
|
||||||
|
import glob, os, time, subprocess
|
||||||
|
subprocess.Popen(['pdflatex', '-version'])
|
||||||
|
from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex
|
||||||
|
except Exception as e:
|
||||||
|
chatbot.append([ f"解析项目: {txt}",
|
||||||
|
f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
# <-------------- clear history and read input ------------->
|
||||||
|
history = []
|
||||||
|
if os.path.exists(txt):
|
||||||
|
project_folder = txt
|
||||||
|
else:
|
||||||
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||||
|
if len(file_manifest) == 0:
|
||||||
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
# <-------------- if is a zip/tar file ------------->
|
||||||
|
project_folder = desend_to_extracted_folder_if_exist(project_folder)
|
||||||
|
|
||||||
|
|
||||||
|
# <-------------- move latex project away from temp folder ------------->
|
||||||
|
project_folder = move_project(project_folder, arxiv_id=None)
|
||||||
|
|
||||||
|
|
||||||
|
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
||||||
|
if not os.path.exists(project_folder + '/merge_proofread_en.tex'):
|
||||||
|
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
||||||
|
chatbot, history, system_prompt, mode='proofread_en', switch_prompt=_switch_prompt_)
|
||||||
|
|
||||||
|
|
||||||
|
# <-------------- compile PDF ------------->
|
||||||
|
success = yield from 编译Latex(chatbot, history, main_file_original='merge', main_file_modified='merge_proofread_en',
|
||||||
|
work_folder_original=project_folder, work_folder_modified=project_folder, work_folder=project_folder)
|
||||||
|
|
||||||
|
|
||||||
|
# <-------------- zip PDF ------------->
|
||||||
|
zip_res = zip_result(project_folder)
|
||||||
|
if success:
|
||||||
|
chatbot.append((f"成功啦", '请查收结果(压缩包)...'))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面
|
||||||
|
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||||
|
else:
|
||||||
|
chatbot.append((f"失败了", '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 也是可读的, 您可以到Github Issue区, 用该压缩包+对话历史存档进行反馈 ...'))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面
|
||||||
|
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||||
|
|
||||||
|
# <-------------- we are done ------------->
|
||||||
|
return success
|
||||||
|
|
||||||
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
|
@CatchException
|
||||||
|
def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
# <-------------- information about this plugin ------------->
|
||||||
|
chatbot.append([
|
||||||
|
"函数插件功能?",
|
||||||
|
"对整个Latex项目进行翻译, 生成中文PDF。函数插件贡献者: Binary-Husky。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
# <-------------- more requirements ------------->
|
||||||
|
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||||
|
more_req = plugin_kwargs.get("advanced_arg", "")
|
||||||
|
no_cache = more_req.startswith("--no-cache")
|
||||||
|
if no_cache: more_req.lstrip("--no-cache")
|
||||||
|
allow_cache = not no_cache
|
||||||
|
_switch_prompt_ = partial(switch_prompt, more_requirement=more_req)
|
||||||
|
|
||||||
|
# <-------------- check deps ------------->
|
||||||
|
try:
|
||||||
|
import glob, os, time, subprocess
|
||||||
|
subprocess.Popen(['pdflatex', '-version'])
|
||||||
|
from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex
|
||||||
|
except Exception as e:
|
||||||
|
chatbot.append([ f"解析项目: {txt}",
|
||||||
|
f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
# <-------------- clear history and read input ------------->
|
||||||
|
history = []
|
||||||
|
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
|
||||||
|
if txt.endswith('.pdf'):
|
||||||
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
if os.path.exists(txt):
|
||||||
|
project_folder = txt
|
||||||
|
else:
|
||||||
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||||
|
if len(file_manifest) == 0:
|
||||||
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
# <-------------- if is a zip/tar file ------------->
|
||||||
|
project_folder = desend_to_extracted_folder_if_exist(project_folder)
|
||||||
|
|
||||||
|
|
||||||
|
# <-------------- move latex project away from temp folder ------------->
|
||||||
|
project_folder = move_project(project_folder, arxiv_id)
|
||||||
|
|
||||||
|
|
||||||
|
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
||||||
|
if not os.path.exists(project_folder + '/merge_translate_zh.tex'):
|
||||||
|
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
||||||
|
chatbot, history, system_prompt, mode='translate_zh', switch_prompt=_switch_prompt_)
|
||||||
|
|
||||||
|
|
||||||
|
# <-------------- compile PDF ------------->
|
||||||
|
success = yield from 编译Latex(chatbot, history, main_file_original='merge', main_file_modified='merge_translate_zh', mode='translate_zh',
|
||||||
|
work_folder_original=project_folder, work_folder_modified=project_folder, work_folder=project_folder)
|
||||||
|
|
||||||
|
# <-------------- zip PDF ------------->
|
||||||
|
zip_res = zip_result(project_folder)
|
||||||
|
if success:
|
||||||
|
chatbot.append((f"成功啦", '请查收结果(压缩包)...'))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面
|
||||||
|
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||||
|
else:
|
||||||
|
chatbot.append((f"失败了", '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...'))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面
|
||||||
|
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||||
|
|
||||||
|
|
||||||
|
# <-------------- we are done ------------->
|
||||||
|
return success
|
||||||
@@ -0,0 +1,231 @@
|
|||||||
|
"""
|
||||||
|
这是什么?
|
||||||
|
这个文件用于函数插件的单元测试
|
||||||
|
运行方法 python crazy_functions/crazy_functions_test.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
# ==============================================================================================================================
|
||||||
|
|
||||||
|
def validate_path():
|
||||||
|
import os, sys
|
||||||
|
dir_name = os.path.dirname(__file__)
|
||||||
|
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
|
||||||
|
os.chdir(root_dir_assume)
|
||||||
|
sys.path.append(root_dir_assume)
|
||||||
|
validate_path() # validate path so you can run from base directory
|
||||||
|
|
||||||
|
# ==============================================================================================================================
|
||||||
|
|
||||||
|
from colorful import *
|
||||||
|
from toolbox import get_conf, ChatBotWithCookies
|
||||||
|
import contextlib
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from functools import wraps
|
||||||
|
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
||||||
|
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
||||||
|
|
||||||
|
llm_kwargs = {
|
||||||
|
'api_key': API_KEY,
|
||||||
|
'llm_model': LLM_MODEL,
|
||||||
|
'top_p':1.0,
|
||||||
|
'max_length': None,
|
||||||
|
'temperature':1.0,
|
||||||
|
}
|
||||||
|
plugin_kwargs = { }
|
||||||
|
chatbot = ChatBotWithCookies(llm_kwargs)
|
||||||
|
history = []
|
||||||
|
system_prompt = "Serve me as a writing and programming assistant."
|
||||||
|
web_port = 1024
|
||||||
|
|
||||||
|
# ==============================================================================================================================
|
||||||
|
|
||||||
|
def silence_stdout(func):
|
||||||
|
@wraps(func)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
_original_stdout = sys.stdout
|
||||||
|
sys.stdout = open(os.devnull, 'w')
|
||||||
|
for q in func(*args, **kwargs):
|
||||||
|
sys.stdout = _original_stdout
|
||||||
|
yield q
|
||||||
|
sys.stdout = open(os.devnull, 'w')
|
||||||
|
sys.stdout.close()
|
||||||
|
sys.stdout = _original_stdout
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
class CLI_Printer():
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.pre_buf = ""
|
||||||
|
|
||||||
|
def print(self, buf):
|
||||||
|
bufp = ""
|
||||||
|
for index, chat in enumerate(buf):
|
||||||
|
a, b = chat
|
||||||
|
bufp += sprint亮靛('[Me]:' + a) + '\n'
|
||||||
|
bufp += '[GPT]:' + b
|
||||||
|
if index < len(buf)-1:
|
||||||
|
bufp += '\n'
|
||||||
|
|
||||||
|
if self.pre_buf!="" and bufp.startswith(self.pre_buf):
|
||||||
|
print(bufp[len(self.pre_buf):], end='')
|
||||||
|
else:
|
||||||
|
print('\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'+bufp, end='')
|
||||||
|
self.pre_buf = bufp
|
||||||
|
return
|
||||||
|
|
||||||
|
cli_printer = CLI_Printer()
|
||||||
|
# ==============================================================================================================================
|
||||||
|
def test_解析一个Python项目():
|
||||||
|
from crazy_functions.解析项目源代码 import 解析一个Python项目
|
||||||
|
txt = "crazy_functions/test_project/python/dqn"
|
||||||
|
for cookies, cb, hist, msg in 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
print(cb)
|
||||||
|
|
||||||
|
def test_解析一个Cpp项目():
|
||||||
|
from crazy_functions.解析项目源代码 import 解析一个C项目
|
||||||
|
txt = "crazy_functions/test_project/cpp/cppipc"
|
||||||
|
for cookies, cb, hist, msg in 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
print(cb)
|
||||||
|
|
||||||
|
def test_Latex英文润色():
|
||||||
|
from crazy_functions.Latex全文润色 import Latex英文润色
|
||||||
|
txt = "crazy_functions/test_project/latex/attention"
|
||||||
|
for cookies, cb, hist, msg in Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
print(cb)
|
||||||
|
|
||||||
|
def test_Markdown中译英():
|
||||||
|
from crazy_functions.批量Markdown翻译 import Markdown中译英
|
||||||
|
txt = "README.md"
|
||||||
|
for cookies, cb, hist, msg in Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
print(cb)
|
||||||
|
|
||||||
|
def test_批量翻译PDF文档():
|
||||||
|
from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档
|
||||||
|
txt = "crazy_functions/test_project/pdf_and_word"
|
||||||
|
for cookies, cb, hist, msg in 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
print(cb)
|
||||||
|
|
||||||
|
def test_谷歌检索小助手():
|
||||||
|
from crazy_functions.谷歌检索小助手 import 谷歌检索小助手
|
||||||
|
txt = "https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=auto+reinforcement+learning&btnG="
|
||||||
|
for cookies, cb, hist, msg in 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
print(cb)
|
||||||
|
|
||||||
|
def test_总结word文档():
|
||||||
|
from crazy_functions.总结word文档 import 总结word文档
|
||||||
|
txt = "crazy_functions/test_project/pdf_and_word"
|
||||||
|
for cookies, cb, hist, msg in 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
print(cb)
|
||||||
|
|
||||||
|
def test_下载arxiv论文并翻译摘要():
|
||||||
|
from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要
|
||||||
|
txt = "1812.10695"
|
||||||
|
for cookies, cb, hist, msg in 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
print(cb)
|
||||||
|
|
||||||
|
def test_联网回答问题():
|
||||||
|
from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
||||||
|
# txt = "谁是应急食品?"
|
||||||
|
# >> '根据以上搜索结果可以得知,应急食品是“原神”游戏中的角色派蒙的外号。'
|
||||||
|
# txt = "道路千万条,安全第一条。后面两句是?"
|
||||||
|
# >> '行车不规范,亲人两行泪。'
|
||||||
|
# txt = "You should have gone for the head. What does that mean?"
|
||||||
|
# >> The phrase "You should have gone for the head" is a quote from the Marvel movies, Avengers: Infinity War and Avengers: Endgame. It was spoken by the character Thanos in Infinity War and by Thor in Endgame.
|
||||||
|
txt = "AutoGPT是什么?"
|
||||||
|
for cookies, cb, hist, msg in 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
print("当前问答:", cb[-1][-1].replace("\n"," "))
|
||||||
|
for i, it in enumerate(cb): print亮蓝(it[0]); print亮黄(it[1])
|
||||||
|
|
||||||
|
def test_解析ipynb文件():
|
||||||
|
from crazy_functions.解析JupyterNotebook import 解析ipynb文件
|
||||||
|
txt = "crazy_functions/test_samples"
|
||||||
|
for cookies, cb, hist, msg in 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
print(cb)
|
||||||
|
|
||||||
|
|
||||||
|
def test_数学动画生成manim():
|
||||||
|
from crazy_functions.数学动画生成manim import 动画生成
|
||||||
|
txt = "A ball split into 2, and then split into 4, and finally split into 8."
|
||||||
|
for cookies, cb, hist, msg in 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
print(cb)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def test_Markdown多语言():
|
||||||
|
from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言
|
||||||
|
txt = "README.md"
|
||||||
|
history = []
|
||||||
|
for lang in ["English", "French", "Japanese", "Korean", "Russian", "Italian", "German", "Portuguese", "Arabic"]:
|
||||||
|
plugin_kwargs = {"advanced_arg": lang}
|
||||||
|
for cookies, cb, hist, msg in Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
print(cb)
|
||||||
|
|
||||||
|
def test_Langchain知识库():
|
||||||
|
from crazy_functions.Langchain知识库 import 知识库问答
|
||||||
|
txt = "./"
|
||||||
|
chatbot = ChatBotWithCookies(llm_kwargs)
|
||||||
|
for cookies, cb, hist, msg in silence_stdout(知识库问答)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
cli_printer.print(cb) # print(cb)
|
||||||
|
|
||||||
|
chatbot = ChatBotWithCookies(cookies)
|
||||||
|
from crazy_functions.Langchain知识库 import 读取知识库作答
|
||||||
|
txt = "What is the installation method?"
|
||||||
|
for cookies, cb, hist, msg in silence_stdout(读取知识库作答)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
cli_printer.print(cb) # print(cb)
|
||||||
|
|
||||||
|
def test_Langchain知识库读取():
|
||||||
|
from crazy_functions.Langchain知识库 import 读取知识库作答
|
||||||
|
txt = "远程云服务器部署?"
|
||||||
|
for cookies, cb, hist, msg in silence_stdout(读取知识库作答)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
cli_printer.print(cb) # print(cb)
|
||||||
|
|
||||||
|
def test_Latex():
|
||||||
|
from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比, Latex翻译中文并重新编译PDF
|
||||||
|
|
||||||
|
# txt = r"https://arxiv.org/abs/1706.03762"
|
||||||
|
# txt = r"https://arxiv.org/abs/1902.03185"
|
||||||
|
# txt = r"https://arxiv.org/abs/2305.18290"
|
||||||
|
# txt = r"https://arxiv.org/abs/2305.17608"
|
||||||
|
# txt = r"https://arxiv.org/abs/2211.16068" # ACE
|
||||||
|
# txt = r"C:\Users\x\arxiv_cache\2211.16068\workfolder" # ACE
|
||||||
|
# txt = r"https://arxiv.org/abs/2002.09253"
|
||||||
|
# txt = r"https://arxiv.org/abs/2306.07831"
|
||||||
|
# txt = r"https://arxiv.org/abs/2212.10156"
|
||||||
|
# txt = r"https://arxiv.org/abs/2211.11559"
|
||||||
|
# txt = r"https://arxiv.org/abs/2303.08774"
|
||||||
|
txt = r"https://arxiv.org/abs/2303.12712"
|
||||||
|
# txt = r"C:\Users\fuqingxu\arxiv_cache\2303.12712\workfolder"
|
||||||
|
|
||||||
|
|
||||||
|
for cookies, cb, hist, msg in (Latex翻译中文并重新编译PDF)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
cli_printer.print(cb) # print(cb)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# txt = "2302.02948.tar"
|
||||||
|
# print(txt)
|
||||||
|
# main_tex, work_folder = Latex预处理(txt)
|
||||||
|
# print('main tex:', main_tex)
|
||||||
|
# res = 编译Latex(main_tex, work_folder)
|
||||||
|
# # for cookies, cb, hist, msg in silence_stdout(编译Latex)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
# cli_printer.print(cb) # print(cb)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# test_解析一个Python项目()
|
||||||
|
# test_Latex英文润色()
|
||||||
|
# test_Markdown中译英()
|
||||||
|
# test_批量翻译PDF文档()
|
||||||
|
# test_谷歌检索小助手()
|
||||||
|
# test_总结word文档()
|
||||||
|
# test_下载arxiv论文并翻译摘要()
|
||||||
|
# test_解析一个Cpp项目()
|
||||||
|
# test_联网回答问题()
|
||||||
|
# test_解析ipynb文件()
|
||||||
|
# test_数学动画生成manim()
|
||||||
|
# test_Langchain知识库()
|
||||||
|
# test_Langchain知识库读取()
|
||||||
|
if __name__ == "__main__":
|
||||||
|
test_Latex()
|
||||||
|
input("程序完成,回车退出。")
|
||||||
|
print("退出。")
|
||||||
@@ -135,11 +135,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
|
|||||||
yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息
|
yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息
|
||||||
return final_result
|
return final_result
|
||||||
|
|
||||||
def can_multi_process(llm) -> bool:
|
def can_multi_process(llm):
|
||||||
from request_llms.bridge_all import model_info
|
|
||||||
|
|
||||||
def default_condition(llm) -> bool:
|
|
||||||
# legacy condition
|
|
||||||
if llm.startswith('gpt-'): return True
|
if llm.startswith('gpt-'): return True
|
||||||
if llm.startswith('api2d-'): return True
|
if llm.startswith('api2d-'): return True
|
||||||
if llm.startswith('azure-'): return True
|
if llm.startswith('azure-'): return True
|
||||||
@@ -147,14 +143,6 @@ def can_multi_process(llm) -> bool:
|
|||||||
if llm.startswith('zhipuai') or llm.startswith('glm-'): return True
|
if llm.startswith('zhipuai') or llm.startswith('glm-'): return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if llm in model_info:
|
|
||||||
if 'can_multi_thread' in model_info[llm]:
|
|
||||||
return model_info[llm]['can_multi_thread']
|
|
||||||
else:
|
|
||||||
return default_condition(llm)
|
|
||||||
else:
|
|
||||||
return default_condition(llm)
|
|
||||||
|
|
||||||
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||||
inputs_array, inputs_show_user_array, llm_kwargs,
|
inputs_array, inputs_show_user_array, llm_kwargs,
|
||||||
chatbot, history_array, sys_prompt_array,
|
chatbot, history_array, sys_prompt_array,
|
||||||
@@ -568,7 +556,7 @@ class nougat_interface():
|
|||||||
from toolbox import ProxyNetworkActivate
|
from toolbox import ProxyNetworkActivate
|
||||||
logging.info(f'正在执行命令 {command}')
|
logging.info(f'正在执行命令 {command}')
|
||||||
with ProxyNetworkActivate("Nougat_Download"):
|
with ProxyNetworkActivate("Nougat_Download"):
|
||||||
process = subprocess.Popen(command, shell=False, cwd=cwd, env=os.environ)
|
process = subprocess.Popen(command, shell=True, cwd=cwd, env=os.environ)
|
||||||
try:
|
try:
|
||||||
stdout, stderr = process.communicate(timeout=timeout)
|
stdout, stderr = process.communicate(timeout=timeout)
|
||||||
except subprocess.TimeoutExpired:
|
except subprocess.TimeoutExpired:
|
||||||
@@ -592,8 +580,7 @@ class nougat_interface():
|
|||||||
|
|
||||||
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)",
|
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)",
|
||||||
chatbot=chatbot, history=history, delay=0)
|
chatbot=chatbot, history=history, delay=0)
|
||||||
command = ['nougat', '--out', os.path.abspath(dst), os.path.abspath(fp)]
|
self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}"', os.getcwd(), timeout=3600)
|
||||||
self.nougat_with_timeout(command, cwd=os.getcwd(), timeout=3600)
|
|
||||||
res = glob.glob(os.path.join(dst,'*.mmd'))
|
res = glob.glob(os.path.join(dst,'*.mmd'))
|
||||||
if len(res) == 0:
|
if len(res) == 0:
|
||||||
self.threadLock.release()
|
self.threadLock.release()
|
||||||
|
|||||||
@@ -62,8 +62,8 @@ class GptJsonIO():
|
|||||||
if "type" in reduced_schema:
|
if "type" in reduced_schema:
|
||||||
del reduced_schema["type"]
|
del reduced_schema["type"]
|
||||||
# Ensure json in context is well-formed with double quotes.
|
# Ensure json in context is well-formed with double quotes.
|
||||||
schema_str = json.dumps(reduced_schema)
|
|
||||||
if self.example_instruction:
|
if self.example_instruction:
|
||||||
|
schema_str = json.dumps(reduced_schema)
|
||||||
return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
|
return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
|
||||||
else:
|
else:
|
||||||
return PYDANTIC_FORMAT_INSTRUCTIONS_SIMPLE.format(schema=schema_str)
|
return PYDANTIC_FORMAT_INSTRUCTIONS_SIMPLE.format(schema=schema_str)
|
||||||
|
|||||||
@@ -1,11 +1,10 @@
|
|||||||
from toolbox import update_ui, update_ui_lastest_msg, get_log_folder
|
from toolbox import update_ui, update_ui_lastest_msg, get_log_folder
|
||||||
from toolbox import get_conf, promote_file_to_downloadzone
|
from toolbox import get_conf, objdump, objload, promote_file_to_downloadzone
|
||||||
from .latex_toolbox import PRESERVE, TRANSFORM
|
from .latex_toolbox import PRESERVE, TRANSFORM
|
||||||
from .latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace
|
from .latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace
|
||||||
from .latex_toolbox import reverse_forbidden_text_careful_brace, reverse_forbidden_text, convert_to_linklist, post_process
|
from .latex_toolbox import reverse_forbidden_text_careful_brace, reverse_forbidden_text, convert_to_linklist, post_process
|
||||||
from .latex_toolbox import fix_content, find_main_tex_file, merge_tex_files, compile_latex_with_timeout
|
from .latex_toolbox import fix_content, find_main_tex_file, merge_tex_files, compile_latex_with_timeout
|
||||||
from .latex_toolbox import find_title_and_abs
|
from .latex_toolbox import find_title_and_abs
|
||||||
from .latex_pickle_io import objdump, objload
|
|
||||||
|
|
||||||
import os, shutil
|
import os, shutil
|
||||||
import re
|
import re
|
||||||
|
|||||||
@@ -1,38 +0,0 @@
|
|||||||
import pickle
|
|
||||||
|
|
||||||
|
|
||||||
class SafeUnpickler(pickle.Unpickler):
|
|
||||||
|
|
||||||
def get_safe_classes(self):
|
|
||||||
from .latex_actions import LatexPaperFileGroup, LatexPaperSplit
|
|
||||||
# 定义允许的安全类
|
|
||||||
safe_classes = {
|
|
||||||
# 在这里添加其他安全的类
|
|
||||||
'LatexPaperFileGroup': LatexPaperFileGroup,
|
|
||||||
'LatexPaperSplit' : LatexPaperSplit,
|
|
||||||
}
|
|
||||||
return safe_classes
|
|
||||||
|
|
||||||
def find_class(self, module, name):
|
|
||||||
# 只允许特定的类进行反序列化
|
|
||||||
self.safe_classes = self.get_safe_classes()
|
|
||||||
if f'{module}.{name}' in self.safe_classes:
|
|
||||||
return self.safe_classes[f'{module}.{name}']
|
|
||||||
# 如果尝试加载未授权的类,则抛出异常
|
|
||||||
raise pickle.UnpicklingError(f"Attempted to deserialize unauthorized class '{name}' from module '{module}'")
|
|
||||||
|
|
||||||
def objdump(obj, file="objdump.tmp"):
|
|
||||||
|
|
||||||
with open(file, "wb+") as f:
|
|
||||||
pickle.dump(obj, f)
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
def objload(file="objdump.tmp"):
|
|
||||||
import os
|
|
||||||
|
|
||||||
if not os.path.exists(file):
|
|
||||||
return
|
|
||||||
with open(file, "rb") as f:
|
|
||||||
unpickler = SafeUnpickler(f)
|
|
||||||
return unpickler.load()
|
|
||||||
788
crazy_functions/latex_utils.py
普通文件
788
crazy_functions/latex_utils.py
普通文件
@@ -0,0 +1,788 @@
|
|||||||
|
from toolbox import update_ui, update_ui_lastest_msg # 刷新Gradio前端界面
|
||||||
|
from toolbox import zip_folder, objdump, objload, promote_file_to_downloadzone
|
||||||
|
import os, shutil
|
||||||
|
import re
|
||||||
|
import numpy as np
|
||||||
|
pj = os.path.join
|
||||||
|
|
||||||
|
"""
|
||||||
|
========================================================================
|
||||||
|
Part One
|
||||||
|
Latex segmentation with a binary mask (PRESERVE=0, TRANSFORM=1)
|
||||||
|
========================================================================
|
||||||
|
"""
|
||||||
|
PRESERVE = 0
|
||||||
|
TRANSFORM = 1
|
||||||
|
|
||||||
|
def set_forbidden_text(text, mask, pattern, flags=0):
|
||||||
|
"""
|
||||||
|
Add a preserve text area in this paper
|
||||||
|
e.g. with pattern = r"\\begin\{algorithm\}(.*?)\\end\{algorithm\}"
|
||||||
|
you can mask out (mask = PRESERVE so that text become untouchable for GPT)
|
||||||
|
everything between "\begin{equation}" and "\end{equation}"
|
||||||
|
"""
|
||||||
|
if isinstance(pattern, list): pattern = '|'.join(pattern)
|
||||||
|
pattern_compile = re.compile(pattern, flags)
|
||||||
|
for res in pattern_compile.finditer(text):
|
||||||
|
mask[res.span()[0]:res.span()[1]] = PRESERVE
|
||||||
|
return text, mask
|
||||||
|
|
||||||
|
def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
|
||||||
|
"""
|
||||||
|
Move area out of preserve area (make text editable for GPT)
|
||||||
|
count the number of the braces so as to catch compelete text area.
|
||||||
|
e.g.
|
||||||
|
\begin{abstract} blablablablablabla. \end{abstract}
|
||||||
|
"""
|
||||||
|
if isinstance(pattern, list): pattern = '|'.join(pattern)
|
||||||
|
pattern_compile = re.compile(pattern, flags)
|
||||||
|
for res in pattern_compile.finditer(text):
|
||||||
|
if not forbid_wrapper:
|
||||||
|
mask[res.span()[0]:res.span()[1]] = TRANSFORM
|
||||||
|
else:
|
||||||
|
mask[res.regs[0][0]: res.regs[1][0]] = PRESERVE # '\\begin{abstract}'
|
||||||
|
mask[res.regs[1][0]: res.regs[1][1]] = TRANSFORM # abstract
|
||||||
|
mask[res.regs[1][1]: res.regs[0][1]] = PRESERVE # abstract
|
||||||
|
return text, mask
|
||||||
|
|
||||||
|
def set_forbidden_text_careful_brace(text, mask, pattern, flags=0):
|
||||||
|
"""
|
||||||
|
Add a preserve text area in this paper (text become untouchable for GPT).
|
||||||
|
count the number of the braces so as to catch compelete text area.
|
||||||
|
e.g.
|
||||||
|
\caption{blablablablabla\texbf{blablabla}blablabla.}
|
||||||
|
"""
|
||||||
|
pattern_compile = re.compile(pattern, flags)
|
||||||
|
for res in pattern_compile.finditer(text):
|
||||||
|
brace_level = -1
|
||||||
|
p = begin = end = res.regs[0][0]
|
||||||
|
for _ in range(1024*16):
|
||||||
|
if text[p] == '}' and brace_level == 0: break
|
||||||
|
elif text[p] == '}': brace_level -= 1
|
||||||
|
elif text[p] == '{': brace_level += 1
|
||||||
|
p += 1
|
||||||
|
end = p+1
|
||||||
|
mask[begin:end] = PRESERVE
|
||||||
|
return text, mask
|
||||||
|
|
||||||
|
def reverse_forbidden_text_careful_brace(text, mask, pattern, flags=0, forbid_wrapper=True):
|
||||||
|
"""
|
||||||
|
Move area out of preserve area (make text editable for GPT)
|
||||||
|
count the number of the braces so as to catch compelete text area.
|
||||||
|
e.g.
|
||||||
|
\caption{blablablablabla\texbf{blablabla}blablabla.}
|
||||||
|
"""
|
||||||
|
pattern_compile = re.compile(pattern, flags)
|
||||||
|
for res in pattern_compile.finditer(text):
|
||||||
|
brace_level = 0
|
||||||
|
p = begin = end = res.regs[1][0]
|
||||||
|
for _ in range(1024*16):
|
||||||
|
if text[p] == '}' and brace_level == 0: break
|
||||||
|
elif text[p] == '}': brace_level -= 1
|
||||||
|
elif text[p] == '{': brace_level += 1
|
||||||
|
p += 1
|
||||||
|
end = p
|
||||||
|
mask[begin:end] = TRANSFORM
|
||||||
|
if forbid_wrapper:
|
||||||
|
mask[res.regs[0][0]:begin] = PRESERVE
|
||||||
|
mask[end:res.regs[0][1]] = PRESERVE
|
||||||
|
return text, mask
|
||||||
|
|
||||||
|
def set_forbidden_text_begin_end(text, mask, pattern, flags=0, limit_n_lines=42):
|
||||||
|
"""
|
||||||
|
Find all \begin{} ... \end{} text block that with less than limit_n_lines lines.
|
||||||
|
Add it to preserve area
|
||||||
|
"""
|
||||||
|
pattern_compile = re.compile(pattern, flags)
|
||||||
|
def search_with_line_limit(text, mask):
|
||||||
|
for res in pattern_compile.finditer(text):
|
||||||
|
cmd = res.group(1) # begin{what}
|
||||||
|
this = res.group(2) # content between begin and end
|
||||||
|
this_mask = mask[res.regs[2][0]:res.regs[2][1]]
|
||||||
|
white_list = ['document', 'abstract', 'lemma', 'definition', 'sproof',
|
||||||
|
'em', 'emph', 'textit', 'textbf', 'itemize', 'enumerate']
|
||||||
|
if (cmd in white_list) or this.count('\n') >= limit_n_lines: # use a magical number 42
|
||||||
|
this, this_mask = search_with_line_limit(this, this_mask)
|
||||||
|
mask[res.regs[2][0]:res.regs[2][1]] = this_mask
|
||||||
|
else:
|
||||||
|
mask[res.regs[0][0]:res.regs[0][1]] = PRESERVE
|
||||||
|
return text, mask
|
||||||
|
return search_with_line_limit(text, mask)
|
||||||
|
|
||||||
|
class LinkedListNode():
|
||||||
|
"""
|
||||||
|
Linked List Node
|
||||||
|
"""
|
||||||
|
def __init__(self, string, preserve=True) -> None:
|
||||||
|
self.string = string
|
||||||
|
self.preserve = preserve
|
||||||
|
self.next = None
|
||||||
|
# self.begin_line = 0
|
||||||
|
# self.begin_char = 0
|
||||||
|
|
||||||
|
def convert_to_linklist(text, mask):
|
||||||
|
root = LinkedListNode("", preserve=True)
|
||||||
|
current_node = root
|
||||||
|
for c, m, i in zip(text, mask, range(len(text))):
|
||||||
|
if (m==PRESERVE and current_node.preserve) \
|
||||||
|
or (m==TRANSFORM and not current_node.preserve):
|
||||||
|
# add
|
||||||
|
current_node.string += c
|
||||||
|
else:
|
||||||
|
current_node.next = LinkedListNode(c, preserve=(m==PRESERVE))
|
||||||
|
current_node = current_node.next
|
||||||
|
return root
|
||||||
|
"""
|
||||||
|
========================================================================
|
||||||
|
Latex Merge File
|
||||||
|
========================================================================
|
||||||
|
"""
|
||||||
|
|
||||||
|
def 寻找Latex主文件(file_manifest, mode):
|
||||||
|
"""
|
||||||
|
在多Tex文档中,寻找主文件,必须包含documentclass,返回找到的第一个。
|
||||||
|
P.S. 但愿没人把latex模板放在里面传进来 (6.25 加入判定latex模板的代码)
|
||||||
|
"""
|
||||||
|
canidates = []
|
||||||
|
for texf in file_manifest:
|
||||||
|
if os.path.basename(texf).startswith('merge'):
|
||||||
|
continue
|
||||||
|
with open(texf, 'r', encoding='utf8') as f:
|
||||||
|
file_content = f.read()
|
||||||
|
if r'\documentclass' in file_content:
|
||||||
|
canidates.append(texf)
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if len(canidates) == 0:
|
||||||
|
raise RuntimeError('无法找到一个主Tex文件(包含documentclass关键字)')
|
||||||
|
elif len(canidates) == 1:
|
||||||
|
return canidates[0]
|
||||||
|
else: # if len(canidates) >= 2 通过一些Latex模板中常见(但通常不会出现在正文)的单词,对不同latex源文件扣分,取评分最高者返回
|
||||||
|
canidates_score = []
|
||||||
|
# 给出一些判定模板文档的词作为扣分项
|
||||||
|
unexpected_words = ['\LaTeX', 'manuscript', 'Guidelines', 'font', 'citations', 'rejected', 'blind review', 'reviewers']
|
||||||
|
expected_words = ['\input', '\ref', '\cite']
|
||||||
|
for texf in canidates:
|
||||||
|
canidates_score.append(0)
|
||||||
|
with open(texf, 'r', encoding='utf8') as f:
|
||||||
|
file_content = f.read()
|
||||||
|
for uw in unexpected_words:
|
||||||
|
if uw in file_content:
|
||||||
|
canidates_score[-1] -= 1
|
||||||
|
for uw in expected_words:
|
||||||
|
if uw in file_content:
|
||||||
|
canidates_score[-1] += 1
|
||||||
|
select = np.argmax(canidates_score) # 取评分最高者返回
|
||||||
|
return canidates[select]
|
||||||
|
|
||||||
|
def rm_comments(main_file):
|
||||||
|
new_file_remove_comment_lines = []
|
||||||
|
for l in main_file.splitlines():
|
||||||
|
# 删除整行的空注释
|
||||||
|
if l.lstrip().startswith("%"):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
new_file_remove_comment_lines.append(l)
|
||||||
|
main_file = '\n'.join(new_file_remove_comment_lines)
|
||||||
|
# main_file = re.sub(r"\\include{(.*?)}", r"\\input{\1}", main_file) # 将 \include 命令转换为 \input 命令
|
||||||
|
main_file = re.sub(r'(?<!\\)%.*', '', main_file) # 使用正则表达式查找半行注释, 并替换为空字符串
|
||||||
|
return main_file
|
||||||
|
|
||||||
|
def merge_tex_files_(project_foler, main_file, mode):
|
||||||
|
"""
|
||||||
|
Merge Tex project recrusively
|
||||||
|
"""
|
||||||
|
main_file = rm_comments(main_file)
|
||||||
|
for s in reversed([q for q in re.finditer(r"\\input\{(.*?)\}", main_file, re.M)]):
|
||||||
|
f = s.group(1)
|
||||||
|
fp = os.path.join(project_foler, f)
|
||||||
|
if os.path.exists(fp):
|
||||||
|
# e.g., \input{srcs/07_appendix.tex}
|
||||||
|
with open(fp, 'r', encoding='utf-8', errors='replace') as fx:
|
||||||
|
c = fx.read()
|
||||||
|
else:
|
||||||
|
# e.g., \input{srcs/07_appendix}
|
||||||
|
with open(fp+'.tex', 'r', encoding='utf-8', errors='replace') as fx:
|
||||||
|
c = fx.read()
|
||||||
|
c = merge_tex_files_(project_foler, c, mode)
|
||||||
|
main_file = main_file[:s.span()[0]] + c + main_file[s.span()[1]:]
|
||||||
|
return main_file
|
||||||
|
|
||||||
|
def merge_tex_files(project_foler, main_file, mode):
|
||||||
|
"""
|
||||||
|
Merge Tex project recrusively
|
||||||
|
P.S. 顺便把CTEX塞进去以支持中文
|
||||||
|
P.S. 顺便把Latex的注释去除
|
||||||
|
"""
|
||||||
|
main_file = merge_tex_files_(project_foler, main_file, mode)
|
||||||
|
main_file = rm_comments(main_file)
|
||||||
|
|
||||||
|
if mode == 'translate_zh':
|
||||||
|
# find paper documentclass
|
||||||
|
pattern = re.compile(r'\\documentclass.*\n')
|
||||||
|
match = pattern.search(main_file)
|
||||||
|
assert match is not None, "Cannot find documentclass statement!"
|
||||||
|
position = match.end()
|
||||||
|
add_ctex = '\\usepackage{ctex}\n'
|
||||||
|
add_url = '\\usepackage{url}\n' if '{url}' not in main_file else ''
|
||||||
|
main_file = main_file[:position] + add_ctex + add_url + main_file[position:]
|
||||||
|
# fontset=windows
|
||||||
|
import platform
|
||||||
|
main_file = re.sub(r"\\documentclass\[(.*?)\]{(.*?)}", r"\\documentclass[\1,fontset=windows,UTF8]{\2}",main_file)
|
||||||
|
main_file = re.sub(r"\\documentclass{(.*?)}", r"\\documentclass[fontset=windows,UTF8]{\1}",main_file)
|
||||||
|
# find paper abstract
|
||||||
|
pattern_opt1 = re.compile(r'\\begin\{abstract\}.*\n')
|
||||||
|
pattern_opt2 = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
|
||||||
|
match_opt1 = pattern_opt1.search(main_file)
|
||||||
|
match_opt2 = pattern_opt2.search(main_file)
|
||||||
|
assert (match_opt1 is not None) or (match_opt2 is not None), "Cannot find paper abstract section!"
|
||||||
|
return main_file
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
========================================================================
|
||||||
|
Post process
|
||||||
|
========================================================================
|
||||||
|
"""
|
||||||
|
def mod_inbraket(match):
|
||||||
|
"""
|
||||||
|
为啥chatgpt会把cite里面的逗号换成中文逗号呀
|
||||||
|
"""
|
||||||
|
# get the matched string
|
||||||
|
cmd = match.group(1)
|
||||||
|
str_to_modify = match.group(2)
|
||||||
|
# modify the matched string
|
||||||
|
str_to_modify = str_to_modify.replace(':', ':') # 前面是中文冒号,后面是英文冒号
|
||||||
|
str_to_modify = str_to_modify.replace(',', ',') # 前面是中文逗号,后面是英文逗号
|
||||||
|
# str_to_modify = 'BOOM'
|
||||||
|
return "\\" + cmd + "{" + str_to_modify + "}"
|
||||||
|
|
||||||
|
def fix_content(final_tex, node_string):
|
||||||
|
"""
|
||||||
|
Fix common GPT errors to increase success rate
|
||||||
|
"""
|
||||||
|
final_tex = re.sub(r"(?<!\\)%", "\\%", final_tex)
|
||||||
|
final_tex = re.sub(r"\\([a-z]{2,10})\ \{", r"\\\1{", string=final_tex)
|
||||||
|
final_tex = re.sub(r"\\\ ([a-z]{2,10})\{", r"\\\1{", string=final_tex)
|
||||||
|
final_tex = re.sub(r"\\([a-z]{2,10})\{([^\}]*?)\}", mod_inbraket, string=final_tex)
|
||||||
|
|
||||||
|
if "Traceback" in final_tex and "[Local Message]" in final_tex:
|
||||||
|
final_tex = node_string # 出问题了,还原原文
|
||||||
|
if node_string.count('\\begin') != final_tex.count('\\begin'):
|
||||||
|
final_tex = node_string # 出问题了,还原原文
|
||||||
|
if node_string.count('\_') > 0 and node_string.count('\_') > final_tex.count('\_'):
|
||||||
|
# walk and replace any _ without \
|
||||||
|
final_tex = re.sub(r"(?<!\\)_", "\\_", final_tex)
|
||||||
|
|
||||||
|
def compute_brace_level(string):
|
||||||
|
# this function count the number of { and }
|
||||||
|
brace_level = 0
|
||||||
|
for c in string:
|
||||||
|
if c == "{": brace_level += 1
|
||||||
|
elif c == "}": brace_level -= 1
|
||||||
|
return brace_level
|
||||||
|
def join_most(tex_t, tex_o):
|
||||||
|
# this function join translated string and original string when something goes wrong
|
||||||
|
p_t = 0
|
||||||
|
p_o = 0
|
||||||
|
def find_next(string, chars, begin):
|
||||||
|
p = begin
|
||||||
|
while p < len(string):
|
||||||
|
if string[p] in chars: return p, string[p]
|
||||||
|
p += 1
|
||||||
|
return None, None
|
||||||
|
while True:
|
||||||
|
res1, char = find_next(tex_o, ['{','}'], p_o)
|
||||||
|
if res1 is None: break
|
||||||
|
res2, char = find_next(tex_t, [char], p_t)
|
||||||
|
if res2 is None: break
|
||||||
|
p_o = res1 + 1
|
||||||
|
p_t = res2 + 1
|
||||||
|
return tex_t[:p_t] + tex_o[p_o:]
|
||||||
|
|
||||||
|
if compute_brace_level(final_tex) != compute_brace_level(node_string):
|
||||||
|
# 出问题了,还原部分原文,保证括号正确
|
||||||
|
final_tex = join_most(final_tex, node_string)
|
||||||
|
return final_tex
|
||||||
|
|
||||||
|
def split_subprocess(txt, project_folder, return_dict, opts):
|
||||||
|
"""
|
||||||
|
break down latex file to a linked list,
|
||||||
|
each node use a preserve flag to indicate whether it should
|
||||||
|
be proccessed by GPT.
|
||||||
|
"""
|
||||||
|
text = txt
|
||||||
|
mask = np.zeros(len(txt), dtype=np.uint8) + TRANSFORM
|
||||||
|
|
||||||
|
# 吸收title与作者以上的部分
|
||||||
|
text, mask = set_forbidden_text(text, mask, r"(.*?)\\maketitle", re.DOTALL)
|
||||||
|
# 吸收iffalse注释
|
||||||
|
text, mask = set_forbidden_text(text, mask, r"\\iffalse(.*?)\\fi", re.DOTALL)
|
||||||
|
# 吸收在42行以内的begin-end组合
|
||||||
|
text, mask = set_forbidden_text_begin_end(text, mask, r"\\begin\{([a-z\*]*)\}(.*?)\\end\{\1\}", re.DOTALL, limit_n_lines=42)
|
||||||
|
# 吸收匿名公式
|
||||||
|
text, mask = set_forbidden_text(text, mask, [ r"\$\$(.*?)\$\$", r"\\\[.*?\\\]" ], re.DOTALL)
|
||||||
|
# 吸收其他杂项
|
||||||
|
text, mask = set_forbidden_text(text, mask, [ r"\\section\{(.*?)\}", r"\\section\*\{(.*?)\}", r"\\subsection\{(.*?)\}", r"\\subsubsection\{(.*?)\}" ])
|
||||||
|
text, mask = set_forbidden_text(text, mask, [ r"\\bibliography\{(.*?)\}", r"\\bibliographystyle\{(.*?)\}" ])
|
||||||
|
text, mask = set_forbidden_text(text, mask, r"\\begin\{thebibliography\}.*?\\end\{thebibliography\}", re.DOTALL)
|
||||||
|
text, mask = set_forbidden_text(text, mask, r"\\begin\{lstlisting\}(.*?)\\end\{lstlisting\}", re.DOTALL)
|
||||||
|
text, mask = set_forbidden_text(text, mask, r"\\begin\{wraptable\}(.*?)\\end\{wraptable\}", re.DOTALL)
|
||||||
|
text, mask = set_forbidden_text(text, mask, r"\\begin\{algorithm\}(.*?)\\end\{algorithm\}", re.DOTALL)
|
||||||
|
text, mask = set_forbidden_text(text, mask, [r"\\begin\{wrapfigure\}(.*?)\\end\{wrapfigure\}", r"\\begin\{wrapfigure\*\}(.*?)\\end\{wrapfigure\*\}"], re.DOTALL)
|
||||||
|
text, mask = set_forbidden_text(text, mask, [r"\\begin\{figure\}(.*?)\\end\{figure\}", r"\\begin\{figure\*\}(.*?)\\end\{figure\*\}"], re.DOTALL)
|
||||||
|
text, mask = set_forbidden_text(text, mask, [r"\\begin\{multline\}(.*?)\\end\{multline\}", r"\\begin\{multline\*\}(.*?)\\end\{multline\*\}"], re.DOTALL)
|
||||||
|
text, mask = set_forbidden_text(text, mask, [r"\\begin\{table\}(.*?)\\end\{table\}", r"\\begin\{table\*\}(.*?)\\end\{table\*\}"], re.DOTALL)
|
||||||
|
text, mask = set_forbidden_text(text, mask, [r"\\begin\{minipage\}(.*?)\\end\{minipage\}", r"\\begin\{minipage\*\}(.*?)\\end\{minipage\*\}"], re.DOTALL)
|
||||||
|
text, mask = set_forbidden_text(text, mask, [r"\\begin\{align\*\}(.*?)\\end\{align\*\}", r"\\begin\{align\}(.*?)\\end\{align\}"], re.DOTALL)
|
||||||
|
text, mask = set_forbidden_text(text, mask, [r"\\begin\{equation\}(.*?)\\end\{equation\}", r"\\begin\{equation\*\}(.*?)\\end\{equation\*\}"], re.DOTALL)
|
||||||
|
text, mask = set_forbidden_text(text, mask, [r"\\includepdf\[(.*?)\]\{(.*?)\}", r"\\clearpage", r"\\newpage", r"\\appendix", r"\\tableofcontents", r"\\include\{(.*?)\}"])
|
||||||
|
text, mask = set_forbidden_text(text, mask, [r"\\vspace\{(.*?)\}", r"\\hspace\{(.*?)\}", r"\\label\{(.*?)\}", r"\\begin\{(.*?)\}", r"\\end\{(.*?)\}", r"\\item "])
|
||||||
|
text, mask = set_forbidden_text_careful_brace(text, mask, r"\\hl\{(.*?)\}", re.DOTALL)
|
||||||
|
# reverse 操作必须放在最后
|
||||||
|
text, mask = reverse_forbidden_text_careful_brace(text, mask, r"\\caption\{(.*?)\}", re.DOTALL, forbid_wrapper=True)
|
||||||
|
text, mask = reverse_forbidden_text_careful_brace(text, mask, r"\\abstract\{(.*?)\}", re.DOTALL, forbid_wrapper=True)
|
||||||
|
text, mask = reverse_forbidden_text(text, mask, r"\\begin\{abstract\}(.*?)\\end\{abstract\}", re.DOTALL, forbid_wrapper=True)
|
||||||
|
root = convert_to_linklist(text, mask)
|
||||||
|
|
||||||
|
# 修复括号
|
||||||
|
node = root
|
||||||
|
while True:
|
||||||
|
string = node.string
|
||||||
|
if node.preserve:
|
||||||
|
node = node.next
|
||||||
|
if node is None: break
|
||||||
|
continue
|
||||||
|
def break_check(string):
|
||||||
|
str_stack = [""] # (lv, index)
|
||||||
|
for i, c in enumerate(string):
|
||||||
|
if c == '{':
|
||||||
|
str_stack.append('{')
|
||||||
|
elif c == '}':
|
||||||
|
if len(str_stack) == 1:
|
||||||
|
print('stack fix')
|
||||||
|
return i
|
||||||
|
str_stack.pop(-1)
|
||||||
|
else:
|
||||||
|
str_stack[-1] += c
|
||||||
|
return -1
|
||||||
|
bp = break_check(string)
|
||||||
|
|
||||||
|
if bp == -1:
|
||||||
|
pass
|
||||||
|
elif bp == 0:
|
||||||
|
node.string = string[:1]
|
||||||
|
q = LinkedListNode(string[1:], False)
|
||||||
|
q.next = node.next
|
||||||
|
node.next = q
|
||||||
|
else:
|
||||||
|
node.string = string[:bp]
|
||||||
|
q = LinkedListNode(string[bp:], False)
|
||||||
|
q.next = node.next
|
||||||
|
node.next = q
|
||||||
|
|
||||||
|
node = node.next
|
||||||
|
if node is None: break
|
||||||
|
|
||||||
|
# 屏蔽空行和太短的句子
|
||||||
|
node = root
|
||||||
|
while True:
|
||||||
|
if len(node.string.strip('\n').strip(''))==0: node.preserve = True
|
||||||
|
if len(node.string.strip('\n').strip(''))<42: node.preserve = True
|
||||||
|
node = node.next
|
||||||
|
if node is None: break
|
||||||
|
node = root
|
||||||
|
while True:
|
||||||
|
if node.next and node.preserve and node.next.preserve:
|
||||||
|
node.string += node.next.string
|
||||||
|
node.next = node.next.next
|
||||||
|
node = node.next
|
||||||
|
if node is None: break
|
||||||
|
|
||||||
|
# 将前后断行符脱离
|
||||||
|
node = root
|
||||||
|
prev_node = None
|
||||||
|
while True:
|
||||||
|
if not node.preserve:
|
||||||
|
lstriped_ = node.string.lstrip().lstrip('\n')
|
||||||
|
if (prev_node is not None) and (prev_node.preserve) and (len(lstriped_)!=len(node.string)):
|
||||||
|
prev_node.string += node.string[:-len(lstriped_)]
|
||||||
|
node.string = lstriped_
|
||||||
|
rstriped_ = node.string.rstrip().rstrip('\n')
|
||||||
|
if (node.next is not None) and (node.next.preserve) and (len(rstriped_)!=len(node.string)):
|
||||||
|
node.next.string = node.string[len(rstriped_):] + node.next.string
|
||||||
|
node.string = rstriped_
|
||||||
|
# =====
|
||||||
|
prev_node = node
|
||||||
|
node = node.next
|
||||||
|
if node is None: break
|
||||||
|
# 输出html调试文件,用红色标注处保留区(PRESERVE),用黑色标注转换区(TRANSFORM)
|
||||||
|
with open(pj(project_folder, 'debug_log.html'), 'w', encoding='utf8') as f:
|
||||||
|
segment_parts_for_gpt = []
|
||||||
|
nodes = []
|
||||||
|
node = root
|
||||||
|
while True:
|
||||||
|
nodes.append(node)
|
||||||
|
show_html = node.string.replace('\n','<br/>')
|
||||||
|
if not node.preserve:
|
||||||
|
segment_parts_for_gpt.append(node.string)
|
||||||
|
f.write(f'<p style="color:black;">#{show_html}#</p>')
|
||||||
|
else:
|
||||||
|
f.write(f'<p style="color:red;">{show_html}</p>')
|
||||||
|
node = node.next
|
||||||
|
if node is None: break
|
||||||
|
|
||||||
|
for n in nodes: n.next = None # break
|
||||||
|
return_dict['nodes'] = nodes
|
||||||
|
return_dict['segment_parts_for_gpt'] = segment_parts_for_gpt
|
||||||
|
return return_dict
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class LatexPaperSplit():
|
||||||
|
"""
|
||||||
|
break down latex file to a linked list,
|
||||||
|
each node use a preserve flag to indicate whether it should
|
||||||
|
be proccessed by GPT.
|
||||||
|
"""
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.nodes = None
|
||||||
|
self.msg = "*{\\scriptsize\\textbf{警告:该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成," + \
|
||||||
|
"版权归原文作者所有。翻译内容可靠性无保障,请仔细鉴别并以原文为准。" + \
|
||||||
|
"项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。"
|
||||||
|
# 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者)
|
||||||
|
self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\"
|
||||||
|
|
||||||
|
def merge_result(self, arr, mode, msg):
|
||||||
|
"""
|
||||||
|
Merge the result after the GPT process completed
|
||||||
|
"""
|
||||||
|
result_string = ""
|
||||||
|
p = 0
|
||||||
|
for node in self.nodes:
|
||||||
|
if node.preserve:
|
||||||
|
result_string += node.string
|
||||||
|
else:
|
||||||
|
result_string += fix_content(arr[p], node.string)
|
||||||
|
p += 1
|
||||||
|
if mode == 'translate_zh':
|
||||||
|
pattern = re.compile(r'\\begin\{abstract\}.*\n')
|
||||||
|
match = pattern.search(result_string)
|
||||||
|
if not match:
|
||||||
|
# match \abstract{xxxx}
|
||||||
|
pattern_compile = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
|
||||||
|
match = pattern_compile.search(result_string)
|
||||||
|
position = match.regs[1][0]
|
||||||
|
else:
|
||||||
|
# match \begin{abstract}xxxx\end{abstract}
|
||||||
|
position = match.end()
|
||||||
|
result_string = result_string[:position] + self.msg + msg + self.msg_declare + result_string[position:]
|
||||||
|
return result_string
|
||||||
|
|
||||||
|
def split(self, txt, project_folder, opts):
|
||||||
|
"""
|
||||||
|
break down latex file to a linked list,
|
||||||
|
each node use a preserve flag to indicate whether it should
|
||||||
|
be proccessed by GPT.
|
||||||
|
P.S. use multiprocessing to avoid timeout error
|
||||||
|
"""
|
||||||
|
import multiprocessing
|
||||||
|
manager = multiprocessing.Manager()
|
||||||
|
return_dict = manager.dict()
|
||||||
|
p = multiprocessing.Process(
|
||||||
|
target=split_subprocess,
|
||||||
|
args=(txt, project_folder, return_dict, opts))
|
||||||
|
p.start()
|
||||||
|
p.join()
|
||||||
|
p.close()
|
||||||
|
self.nodes = return_dict['nodes']
|
||||||
|
self.sp = return_dict['segment_parts_for_gpt']
|
||||||
|
return self.sp
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class LatexPaperFileGroup():
|
||||||
|
"""
|
||||||
|
use tokenizer to break down text according to max_token_limit
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
self.file_paths = []
|
||||||
|
self.file_contents = []
|
||||||
|
self.sp_file_contents = []
|
||||||
|
self.sp_file_index = []
|
||||||
|
self.sp_file_tag = []
|
||||||
|
|
||||||
|
# count_token
|
||||||
|
from request_llm.bridge_all import model_info
|
||||||
|
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||||
|
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||||
|
self.get_token_num = get_token_num
|
||||||
|
|
||||||
|
def run_file_split(self, max_token_limit=1900):
|
||||||
|
"""
|
||||||
|
use tokenizer to break down text according to max_token_limit
|
||||||
|
"""
|
||||||
|
for index, file_content in enumerate(self.file_contents):
|
||||||
|
if self.get_token_num(file_content) < max_token_limit:
|
||||||
|
self.sp_file_contents.append(file_content)
|
||||||
|
self.sp_file_index.append(index)
|
||||||
|
self.sp_file_tag.append(self.file_paths[index])
|
||||||
|
else:
|
||||||
|
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||||
|
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||||
|
for j, segment in enumerate(segments):
|
||||||
|
self.sp_file_contents.append(segment)
|
||||||
|
self.sp_file_index.append(index)
|
||||||
|
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
|
||||||
|
print('Segmentation: done')
|
||||||
|
|
||||||
|
def merge_result(self):
|
||||||
|
self.file_result = ["" for _ in range(len(self.file_paths))]
|
||||||
|
for r, k in zip(self.sp_file_result, self.sp_file_index):
|
||||||
|
self.file_result[k] += r
|
||||||
|
|
||||||
|
def write_result(self):
|
||||||
|
manifest = []
|
||||||
|
for path, res in zip(self.file_paths, self.file_result):
|
||||||
|
with open(path + '.polish.tex', 'w', encoding='utf8') as f:
|
||||||
|
manifest.append(path + '.polish.tex')
|
||||||
|
f.write(res)
|
||||||
|
return manifest
|
||||||
|
|
||||||
|
def write_html(sp_file_contents, sp_file_result, chatbot, project_folder):
|
||||||
|
|
||||||
|
# write html
|
||||||
|
try:
|
||||||
|
import shutil
|
||||||
|
from .crazy_utils import construct_html
|
||||||
|
from toolbox import gen_time_str
|
||||||
|
ch = construct_html()
|
||||||
|
orig = ""
|
||||||
|
trans = ""
|
||||||
|
final = []
|
||||||
|
for c,r in zip(sp_file_contents, sp_file_result):
|
||||||
|
final.append(c)
|
||||||
|
final.append(r)
|
||||||
|
for i, k in enumerate(final):
|
||||||
|
if i%2==0:
|
||||||
|
orig = k
|
||||||
|
if i%2==1:
|
||||||
|
trans = k
|
||||||
|
ch.add_row(a=orig, b=trans)
|
||||||
|
create_report_file_name = f"{gen_time_str()}.trans.html"
|
||||||
|
ch.save_file(create_report_file_name)
|
||||||
|
shutil.copyfile(pj('./gpt_log/', create_report_file_name), pj(project_folder, create_report_file_name))
|
||||||
|
promote_file_to_downloadzone(file=f'./gpt_log/{create_report_file_name}', chatbot=chatbot)
|
||||||
|
except:
|
||||||
|
from toolbox import trimmed_format_exc
|
||||||
|
print('writing html result failed:', trimmed_format_exc())
|
||||||
|
|
||||||
|
def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, mode='proofread', switch_prompt=None, opts=[]):
|
||||||
|
import time, os, re
|
||||||
|
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||||
|
from .latex_utils import LatexPaperFileGroup, merge_tex_files, LatexPaperSplit, 寻找Latex主文件
|
||||||
|
|
||||||
|
# <-------- 寻找主tex文件 ---------->
|
||||||
|
maintex = 寻找Latex主文件(file_manifest, mode)
|
||||||
|
chatbot.append((f"定位主Latex文件", f'[Local Message] 分析结果:该项目的Latex主文件是{maintex}, 如果分析错误, 请立即终止程序, 删除或修改歧义文件, 然后重试。主程序即将开始, 请稍候。'))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
time.sleep(3)
|
||||||
|
|
||||||
|
# <-------- 读取Latex文件, 将多文件tex工程融合为一个巨型tex ---------->
|
||||||
|
main_tex_basename = os.path.basename(maintex)
|
||||||
|
assert main_tex_basename.endswith('.tex')
|
||||||
|
main_tex_basename_bare = main_tex_basename[:-4]
|
||||||
|
may_exist_bbl = pj(project_folder, f'{main_tex_basename_bare}.bbl')
|
||||||
|
if os.path.exists(may_exist_bbl):
|
||||||
|
shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge.bbl'))
|
||||||
|
shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge_{mode}.bbl'))
|
||||||
|
shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge_diff.bbl'))
|
||||||
|
|
||||||
|
with open(maintex, 'r', encoding='utf-8', errors='replace') as f:
|
||||||
|
content = f.read()
|
||||||
|
merged_content = merge_tex_files(project_folder, content, mode)
|
||||||
|
|
||||||
|
with open(project_folder + '/merge.tex', 'w', encoding='utf-8', errors='replace') as f:
|
||||||
|
f.write(merged_content)
|
||||||
|
|
||||||
|
# <-------- 精细切分latex文件 ---------->
|
||||||
|
chatbot.append((f"Latex文件融合完成", f'[Local Message] 正在精细切分latex文件,这需要一段时间计算,文档越长耗时越长,请耐心等待。'))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
lps = LatexPaperSplit()
|
||||||
|
res = lps.split(merged_content, project_folder, opts) # 消耗时间的函数
|
||||||
|
|
||||||
|
# <-------- 拆分过长的latex片段 ---------->
|
||||||
|
pfg = LatexPaperFileGroup()
|
||||||
|
for index, r in enumerate(res):
|
||||||
|
pfg.file_paths.append('segment-' + str(index))
|
||||||
|
pfg.file_contents.append(r)
|
||||||
|
|
||||||
|
pfg.run_file_split(max_token_limit=1024)
|
||||||
|
n_split = len(pfg.sp_file_contents)
|
||||||
|
|
||||||
|
# <-------- 根据需要切换prompt ---------->
|
||||||
|
inputs_array, sys_prompt_array = switch_prompt(pfg, mode)
|
||||||
|
inputs_show_user_array = [f"{mode} {f}" for f in pfg.sp_file_tag]
|
||||||
|
|
||||||
|
if os.path.exists(pj(project_folder,'temp.pkl')):
|
||||||
|
|
||||||
|
# <-------- 【仅调试】如果存在调试缓存文件,则跳过GPT请求环节 ---------->
|
||||||
|
pfg = objload(file=pj(project_folder,'temp.pkl'))
|
||||||
|
|
||||||
|
else:
|
||||||
|
# <-------- gpt 多线程请求 ---------->
|
||||||
|
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||||
|
inputs_array=inputs_array,
|
||||||
|
inputs_show_user_array=inputs_show_user_array,
|
||||||
|
llm_kwargs=llm_kwargs,
|
||||||
|
chatbot=chatbot,
|
||||||
|
history_array=[[""] for _ in range(n_split)],
|
||||||
|
sys_prompt_array=sys_prompt_array,
|
||||||
|
# max_workers=5, # 并行任务数量限制, 最多同时执行5个, 其他的排队等待
|
||||||
|
scroller_max_len = 40
|
||||||
|
)
|
||||||
|
|
||||||
|
# <-------- 文本碎片重组为完整的tex片段 ---------->
|
||||||
|
pfg.sp_file_result = []
|
||||||
|
for i_say, gpt_say, orig_content in zip(gpt_response_collection[0::2], gpt_response_collection[1::2], pfg.sp_file_contents):
|
||||||
|
pfg.sp_file_result.append(gpt_say)
|
||||||
|
pfg.merge_result()
|
||||||
|
|
||||||
|
# <-------- 临时存储用于调试 ---------->
|
||||||
|
pfg.get_token_num = None
|
||||||
|
objdump(pfg, file=pj(project_folder,'temp.pkl'))
|
||||||
|
|
||||||
|
write_html(pfg.sp_file_contents, pfg.sp_file_result, chatbot=chatbot, project_folder=project_folder)
|
||||||
|
|
||||||
|
# <-------- 写出文件 ---------->
|
||||||
|
msg = f"当前大语言模型: {llm_kwargs['llm_model']},当前语言模型温度设定: {llm_kwargs['temperature']}。"
|
||||||
|
final_tex = lps.merge_result(pfg.file_result, mode, msg)
|
||||||
|
with open(project_folder + f'/merge_{mode}.tex', 'w', encoding='utf-8', errors='replace') as f:
|
||||||
|
if mode != 'translate_zh' or "binary" in final_tex: f.write(final_tex)
|
||||||
|
|
||||||
|
|
||||||
|
# <-------- 整理结果, 退出 ---------->
|
||||||
|
chatbot.append((f"完成了吗?", 'GPT结果已输出, 正在编译PDF'))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
# <-------- 返回 ---------->
|
||||||
|
return project_folder + f'/merge_{mode}.tex'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def remove_buggy_lines(file_path, log_path, tex_name, tex_name_pure, n_fix, work_folder_modified):
|
||||||
|
try:
|
||||||
|
with open(log_path, 'r', encoding='utf-8', errors='replace') as f:
|
||||||
|
log = f.read()
|
||||||
|
with open(file_path, 'r', encoding='utf-8', errors='replace') as f:
|
||||||
|
file_lines = f.readlines()
|
||||||
|
import re
|
||||||
|
buggy_lines = re.findall(tex_name+':([0-9]{1,5}):', log)
|
||||||
|
buggy_lines = [int(l) for l in buggy_lines]
|
||||||
|
buggy_lines = sorted(buggy_lines)
|
||||||
|
print("removing lines that has errors", buggy_lines)
|
||||||
|
file_lines.pop(buggy_lines[0]-1)
|
||||||
|
with open(pj(work_folder_modified, f"{tex_name_pure}_fix_{n_fix}.tex"), 'w', encoding='utf-8', errors='replace') as f:
|
||||||
|
f.writelines(file_lines)
|
||||||
|
return True, f"{tex_name_pure}_fix_{n_fix}", buggy_lines
|
||||||
|
except:
|
||||||
|
print("Fatal error occurred, but we cannot identify error, please download zip, read latex log, and compile manually.")
|
||||||
|
return False, -1, [-1]
|
||||||
|
|
||||||
|
def compile_latex_with_timeout(command, cwd, timeout=60):
|
||||||
|
import subprocess
|
||||||
|
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
|
||||||
|
try:
|
||||||
|
stdout, stderr = process.communicate(timeout=timeout)
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
process.kill()
|
||||||
|
stdout, stderr = process.communicate()
|
||||||
|
print("Process timed out!")
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_folder_original, work_folder_modified, work_folder, mode='default'):
|
||||||
|
import os, time
|
||||||
|
current_dir = os.getcwd()
|
||||||
|
n_fix = 1
|
||||||
|
max_try = 32
|
||||||
|
chatbot.append([f"正在编译PDF文档", f'编译已经开始。当前工作路径为{work_folder},如果程序停顿5分钟以上,请直接去该路径下取回翻译结果,或者重启之后再度尝试 ...']); yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
chatbot.append([f"正在编译PDF文档", '...']); yield from update_ui(chatbot=chatbot, history=history); time.sleep(1); chatbot[-1] = list(chatbot[-1]) # 刷新界面
|
||||||
|
yield from update_ui_lastest_msg('编译已经开始...', chatbot, history) # 刷新Gradio前端界面
|
||||||
|
|
||||||
|
while True:
|
||||||
|
import os
|
||||||
|
|
||||||
|
# https://stackoverflow.com/questions/738755/dont-make-me-manually-abort-a-latex-compile-when-theres-an-error
|
||||||
|
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译原始PDF ...', chatbot, history) # 刷新Gradio前端界面
|
||||||
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original)
|
||||||
|
|
||||||
|
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译转化后的PDF ...', chatbot, history) # 刷新Gradio前端界面
|
||||||
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified)
|
||||||
|
|
||||||
|
if ok and os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf')):
|
||||||
|
# 只有第二步成功,才能继续下面的步骤
|
||||||
|
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译BibTex ...', chatbot, history) # 刷新Gradio前端界面
|
||||||
|
if not os.path.exists(pj(work_folder_original, f'{main_file_original}.bbl')):
|
||||||
|
ok = compile_latex_with_timeout(f'bibtex {main_file_original}.aux', work_folder_original)
|
||||||
|
if not os.path.exists(pj(work_folder_modified, f'{main_file_modified}.bbl')):
|
||||||
|
ok = compile_latex_with_timeout(f'bibtex {main_file_modified}.aux', work_folder_modified)
|
||||||
|
|
||||||
|
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译文献交叉引用 ...', chatbot, history) # 刷新Gradio前端界面
|
||||||
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original)
|
||||||
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified)
|
||||||
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original)
|
||||||
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified)
|
||||||
|
|
||||||
|
if mode!='translate_zh':
|
||||||
|
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 使用latexdiff生成论文转化前后对比 ...', chatbot, history) # 刷新Gradio前端界面
|
||||||
|
print( f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex')
|
||||||
|
ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex')
|
||||||
|
|
||||||
|
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面
|
||||||
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
|
||||||
|
ok = compile_latex_with_timeout(f'bibtex merge_diff.aux', work_folder)
|
||||||
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
|
||||||
|
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
|
||||||
|
|
||||||
|
|
||||||
|
# <---------- 检查结果 ----------->
|
||||||
|
results_ = ""
|
||||||
|
original_pdf_success = os.path.exists(pj(work_folder_original, f'{main_file_original}.pdf'))
|
||||||
|
modified_pdf_success = os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf'))
|
||||||
|
diff_pdf_success = os.path.exists(pj(work_folder, f'merge_diff.pdf'))
|
||||||
|
results_ += f"原始PDF编译是否成功: {original_pdf_success};"
|
||||||
|
results_ += f"转化PDF编译是否成功: {modified_pdf_success};"
|
||||||
|
results_ += f"对比PDF编译是否成功: {diff_pdf_success};"
|
||||||
|
yield from update_ui_lastest_msg(f'第{n_fix}编译结束:<br/>{results_}...', chatbot, history) # 刷新Gradio前端界面
|
||||||
|
|
||||||
|
if diff_pdf_success:
|
||||||
|
result_pdf = pj(work_folder_modified, f'merge_diff.pdf') # get pdf path
|
||||||
|
promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
|
||||||
|
if modified_pdf_success:
|
||||||
|
yield from update_ui_lastest_msg(f'转化PDF编译已经成功, 即将退出 ...', chatbot, history) # 刷新Gradio前端界面
|
||||||
|
result_pdf = pj(work_folder_modified, f'{main_file_modified}.pdf') # get pdf path
|
||||||
|
if os.path.exists(pj(work_folder, '..', 'translation')):
|
||||||
|
shutil.copyfile(result_pdf, pj(work_folder, '..', 'translation', 'translate_zh.pdf'))
|
||||||
|
promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
|
||||||
|
return True # 成功啦
|
||||||
|
else:
|
||||||
|
if n_fix>=max_try: break
|
||||||
|
n_fix += 1
|
||||||
|
can_retry, main_file_modified, buggy_lines = remove_buggy_lines(
|
||||||
|
file_path=pj(work_folder_modified, f'{main_file_modified}.tex'),
|
||||||
|
log_path=pj(work_folder_modified, f'{main_file_modified}.log'),
|
||||||
|
tex_name=f'{main_file_modified}.tex',
|
||||||
|
tex_name_pure=f'{main_file_modified}',
|
||||||
|
n_fix=n_fix,
|
||||||
|
work_folder_modified=work_folder_modified,
|
||||||
|
)
|
||||||
|
yield from update_ui_lastest_msg(f'由于最为关键的转化PDF编译失败, 将根据报错信息修正tex源文件并重试, 当前报错的latex代码处于第{buggy_lines}行 ...', chatbot, history) # 刷新Gradio前端界面
|
||||||
|
if not can_retry: break
|
||||||
|
|
||||||
|
return False # 失败啦
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,73 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
|
||||||
|
|
||||||
<head>
|
|
||||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
|
|
||||||
<title>GPT-Academic 翻译报告书</title>
|
|
||||||
<style>
|
|
||||||
.centered-a {
|
|
||||||
color: red;
|
|
||||||
text-align: center;
|
|
||||||
margin-bottom: 2%;
|
|
||||||
font-size: 1.5em;
|
|
||||||
}
|
|
||||||
.centered-b {
|
|
||||||
color: red;
|
|
||||||
text-align: center;
|
|
||||||
margin-top: 10%;
|
|
||||||
margin-bottom: 20%;
|
|
||||||
font-size: 1.5em;
|
|
||||||
}
|
|
||||||
.centered-c {
|
|
||||||
color: rgba(255, 0, 0, 0);
|
|
||||||
text-align: center;
|
|
||||||
margin-top: 2%;
|
|
||||||
margin-bottom: 20%;
|
|
||||||
font-size: 7em;
|
|
||||||
}
|
|
||||||
</style>
|
|
||||||
<script>
|
|
||||||
// Configure MathJax settings
|
|
||||||
MathJax = {
|
|
||||||
tex: {
|
|
||||||
inlineMath: [
|
|
||||||
['$', '$'],
|
|
||||||
['\(', '\)']
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
addEventListener('zero-md-rendered', () => {MathJax.typeset(); console.log('MathJax typeset!');})
|
|
||||||
</script>
|
|
||||||
<!-- Load MathJax library -->
|
|
||||||
<script src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js"></script>
|
|
||||||
<script
|
|
||||||
type="module"
|
|
||||||
src="https://cdn.jsdelivr.net/gh/zerodevx/zero-md@2/dist/zero-md.min.js"
|
|
||||||
></script>
|
|
||||||
|
|
||||||
</head>
|
|
||||||
|
|
||||||
<body>
|
|
||||||
<div class="test_temp1" style="width:10%; height: 500px; float:left;">
|
|
||||||
|
|
||||||
</div>
|
|
||||||
<div class="test_temp2" style="width:80%; height: 500px; float:left;">
|
|
||||||
<!-- Simply set the `src` attribute to your MD file and win -->
|
|
||||||
<div class="centered-a">
|
|
||||||
请按Ctrl+S保存此页面,否则该页面可能在几分钟后失效。
|
|
||||||
</div>
|
|
||||||
<zero-md src="translated_markdown.md" no-shadow>
|
|
||||||
</zero-md>
|
|
||||||
<div class="centered-b">
|
|
||||||
本报告由GPT-Academic开源项目生成,地址:https://github.com/binary-husky/gpt_academic。
|
|
||||||
</div>
|
|
||||||
<div class="centered-c">
|
|
||||||
本报告由GPT-Academic开源项目生成,地址:https://github.com/binary-husky/gpt_academic。
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div class="test_temp3" style="width:10%; height: 500px; float:left;">
|
|
||||||
</div>
|
|
||||||
|
|
||||||
</body>
|
|
||||||
|
|
||||||
</html>
|
|
||||||
@@ -0,0 +1,87 @@
|
|||||||
|
#include "libipc/buffer.h"
|
||||||
|
#include "libipc/utility/pimpl.h"
|
||||||
|
|
||||||
|
#include <cstring>
|
||||||
|
|
||||||
|
namespace ipc {
|
||||||
|
|
||||||
|
bool operator==(buffer const & b1, buffer const & b2) {
|
||||||
|
return (b1.size() == b2.size()) && (std::memcmp(b1.data(), b2.data(), b1.size()) == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator!=(buffer const & b1, buffer const & b2) {
|
||||||
|
return !(b1 == b2);
|
||||||
|
}
|
||||||
|
|
||||||
|
class buffer::buffer_ : public pimpl<buffer_> {
|
||||||
|
public:
|
||||||
|
void* p_;
|
||||||
|
std::size_t s_;
|
||||||
|
void* a_;
|
||||||
|
buffer::destructor_t d_;
|
||||||
|
|
||||||
|
buffer_(void* p, std::size_t s, buffer::destructor_t d, void* a)
|
||||||
|
: p_(p), s_(s), a_(a), d_(d) {
|
||||||
|
}
|
||||||
|
|
||||||
|
~buffer_() {
|
||||||
|
if (d_ == nullptr) return;
|
||||||
|
d_((a_ == nullptr) ? p_ : a_, s_);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
buffer::buffer()
|
||||||
|
: buffer(nullptr, 0, nullptr, nullptr) {
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer::buffer(void* p, std::size_t s, destructor_t d)
|
||||||
|
: p_(p_->make(p, s, d, nullptr)) {
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer::buffer(void* p, std::size_t s, destructor_t d, void* additional)
|
||||||
|
: p_(p_->make(p, s, d, additional)) {
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer::buffer(void* p, std::size_t s)
|
||||||
|
: buffer(p, s, nullptr) {
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer::buffer(char const & c)
|
||||||
|
: buffer(const_cast<char*>(&c), 1) {
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer::buffer(buffer&& rhs)
|
||||||
|
: buffer() {
|
||||||
|
swap(rhs);
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer::~buffer() {
|
||||||
|
p_->clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
void buffer::swap(buffer& rhs) {
|
||||||
|
std::swap(p_, rhs.p_);
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer& buffer::operator=(buffer rhs) {
|
||||||
|
swap(rhs);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool buffer::empty() const noexcept {
|
||||||
|
return (impl(p_)->p_ == nullptr) || (impl(p_)->s_ == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* buffer::data() noexcept {
|
||||||
|
return impl(p_)->p_;
|
||||||
|
}
|
||||||
|
|
||||||
|
void const * buffer::data() const noexcept {
|
||||||
|
return impl(p_)->p_;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::size_t buffer::size() const noexcept {
|
||||||
|
return impl(p_)->s_;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace ipc
|
||||||
@@ -0,0 +1,701 @@
|
|||||||
|
|
||||||
|
#include <type_traits>
|
||||||
|
#include <cstring>
|
||||||
|
#include <algorithm>
|
||||||
|
#include <utility> // std::pair, std::move, std::forward
|
||||||
|
#include <atomic>
|
||||||
|
#include <type_traits> // aligned_storage_t
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
#include <array>
|
||||||
|
#include <cassert>
|
||||||
|
|
||||||
|
#include "libipc/ipc.h"
|
||||||
|
#include "libipc/def.h"
|
||||||
|
#include "libipc/shm.h"
|
||||||
|
#include "libipc/pool_alloc.h"
|
||||||
|
#include "libipc/queue.h"
|
||||||
|
#include "libipc/policy.h"
|
||||||
|
#include "libipc/rw_lock.h"
|
||||||
|
#include "libipc/waiter.h"
|
||||||
|
|
||||||
|
#include "libipc/utility/log.h"
|
||||||
|
#include "libipc/utility/id_pool.h"
|
||||||
|
#include "libipc/utility/scope_guard.h"
|
||||||
|
#include "libipc/utility/utility.h"
|
||||||
|
|
||||||
|
#include "libipc/memory/resource.h"
|
||||||
|
#include "libipc/platform/detail.h"
|
||||||
|
#include "libipc/circ/elem_array.h"
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
using msg_id_t = std::uint32_t;
|
||||||
|
using acc_t = std::atomic<msg_id_t>;
|
||||||
|
|
||||||
|
template <std::size_t DataSize, std::size_t AlignSize>
|
||||||
|
struct msg_t;
|
||||||
|
|
||||||
|
template <std::size_t AlignSize>
|
||||||
|
struct msg_t<0, AlignSize> {
|
||||||
|
msg_id_t cc_id_;
|
||||||
|
msg_id_t id_;
|
||||||
|
std::int32_t remain_;
|
||||||
|
bool storage_;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <std::size_t DataSize, std::size_t AlignSize>
|
||||||
|
struct msg_t : msg_t<0, AlignSize> {
|
||||||
|
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||||
|
|
||||||
|
msg_t() = default;
|
||||||
|
msg_t(msg_id_t cc_id, msg_id_t id, std::int32_t remain, void const * data, std::size_t size)
|
||||||
|
: msg_t<0, AlignSize> {cc_id, id, remain, (data == nullptr) || (size == 0)} {
|
||||||
|
if (this->storage_) {
|
||||||
|
if (data != nullptr) {
|
||||||
|
// copy storage-id
|
||||||
|
*reinterpret_cast<ipc::storage_id_t*>(&data_) =
|
||||||
|
*static_cast<ipc::storage_id_t const *>(data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else std::memcpy(&data_, data, size);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
ipc::buff_t make_cache(T& data, std::size_t size) {
|
||||||
|
auto ptr = ipc::mem::alloc(size);
|
||||||
|
std::memcpy(ptr, &data, (ipc::detail::min)(sizeof(data), size));
|
||||||
|
return { ptr, size, ipc::mem::free };
|
||||||
|
}
|
||||||
|
|
||||||
|
struct cache_t {
|
||||||
|
std::size_t fill_;
|
||||||
|
ipc::buff_t buff_;
|
||||||
|
|
||||||
|
cache_t(std::size_t f, ipc::buff_t && b)
|
||||||
|
: fill_(f), buff_(std::move(b))
|
||||||
|
{}
|
||||||
|
|
||||||
|
void append(void const * data, std::size_t size) {
|
||||||
|
if (fill_ >= buff_.size() || data == nullptr || size == 0) return;
|
||||||
|
auto new_fill = (ipc::detail::min)(fill_ + size, buff_.size());
|
||||||
|
std::memcpy(static_cast<ipc::byte_t*>(buff_.data()) + fill_, data, new_fill - fill_);
|
||||||
|
fill_ = new_fill;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
auto cc_acc() {
|
||||||
|
static ipc::shm::handle acc_h("__CA_CONN__", sizeof(acc_t));
|
||||||
|
return static_cast<acc_t*>(acc_h.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
IPC_CONSTEXPR_ std::size_t align_chunk_size(std::size_t size) noexcept {
|
||||||
|
return (((size - 1) / ipc::large_msg_align) + 1) * ipc::large_msg_align;
|
||||||
|
}
|
||||||
|
|
||||||
|
IPC_CONSTEXPR_ std::size_t calc_chunk_size(std::size_t size) noexcept {
|
||||||
|
return ipc::make_align(alignof(std::max_align_t), align_chunk_size(
|
||||||
|
ipc::make_align(alignof(std::max_align_t), sizeof(std::atomic<ipc::circ::cc_t>)) + size));
|
||||||
|
}
|
||||||
|
|
||||||
|
struct chunk_t {
|
||||||
|
std::atomic<ipc::circ::cc_t> &conns() noexcept {
|
||||||
|
return *reinterpret_cast<std::atomic<ipc::circ::cc_t> *>(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *data() noexcept {
|
||||||
|
return reinterpret_cast<ipc::byte_t *>(this)
|
||||||
|
+ ipc::make_align(alignof(std::max_align_t), sizeof(std::atomic<ipc::circ::cc_t>));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct chunk_info_t {
|
||||||
|
ipc::id_pool<> pool_;
|
||||||
|
ipc::spin_lock lock_;
|
||||||
|
|
||||||
|
IPC_CONSTEXPR_ static std::size_t chunks_mem_size(std::size_t chunk_size) noexcept {
|
||||||
|
return ipc::id_pool<>::max_count * chunk_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
ipc::byte_t *chunks_mem() noexcept {
|
||||||
|
return reinterpret_cast<ipc::byte_t *>(this + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk_t *at(std::size_t chunk_size, ipc::storage_id_t id) noexcept {
|
||||||
|
if (id < 0) return nullptr;
|
||||||
|
return reinterpret_cast<chunk_t *>(chunks_mem() + (chunk_size * id));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
auto& chunk_storages() {
|
||||||
|
class chunk_handle_t {
|
||||||
|
ipc::shm::handle handle_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
chunk_info_t *get_info(std::size_t chunk_size) {
|
||||||
|
if (!handle_.valid() &&
|
||||||
|
!handle_.acquire( ("__CHUNK_INFO__" + ipc::to_string(chunk_size)).c_str(),
|
||||||
|
sizeof(chunk_info_t) + chunk_info_t::chunks_mem_size(chunk_size) )) {
|
||||||
|
ipc::error("[chunk_storages] chunk_shm.id_info_.acquire failed: chunk_size = %zd\n", chunk_size);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
auto info = static_cast<chunk_info_t*>(handle_.get());
|
||||||
|
if (info == nullptr) {
|
||||||
|
ipc::error("[chunk_storages] chunk_shm.id_info_.get failed: chunk_size = %zd\n", chunk_size);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
static ipc::map<std::size_t, chunk_handle_t> chunk_hs;
|
||||||
|
return chunk_hs;
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk_info_t *chunk_storage_info(std::size_t chunk_size) {
|
||||||
|
auto &storages = chunk_storages();
|
||||||
|
std::decay_t<decltype(storages)>::iterator it;
|
||||||
|
{
|
||||||
|
static ipc::rw_lock lock;
|
||||||
|
IPC_UNUSED_ std::shared_lock<ipc::rw_lock> guard {lock};
|
||||||
|
if ((it = storages.find(chunk_size)) == storages.end()) {
|
||||||
|
using chunk_handle_t = std::decay_t<decltype(storages)>::value_type::second_type;
|
||||||
|
guard.unlock();
|
||||||
|
IPC_UNUSED_ std::lock_guard<ipc::rw_lock> guard {lock};
|
||||||
|
it = storages.emplace(chunk_size, chunk_handle_t{}).first;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return it->second.get_info(chunk_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::pair<ipc::storage_id_t, void*> acquire_storage(std::size_t size, ipc::circ::cc_t conns) {
|
||||||
|
std::size_t chunk_size = calc_chunk_size(size);
|
||||||
|
auto info = chunk_storage_info(chunk_size);
|
||||||
|
if (info == nullptr) return {};
|
||||||
|
|
||||||
|
info->lock_.lock();
|
||||||
|
info->pool_.prepare();
|
||||||
|
// got an unique id
|
||||||
|
auto id = info->pool_.acquire();
|
||||||
|
info->lock_.unlock();
|
||||||
|
|
||||||
|
auto chunk = info->at(chunk_size, id);
|
||||||
|
if (chunk == nullptr) return {};
|
||||||
|
chunk->conns().store(conns, std::memory_order_relaxed);
|
||||||
|
return { id, chunk->data() };
|
||||||
|
}
|
||||||
|
|
||||||
|
void *find_storage(ipc::storage_id_t id, std::size_t size) {
|
||||||
|
if (id < 0) {
|
||||||
|
ipc::error("[find_storage] id is invalid: id = %ld, size = %zd\n", (long)id, size);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
std::size_t chunk_size = calc_chunk_size(size);
|
||||||
|
auto info = chunk_storage_info(chunk_size);
|
||||||
|
if (info == nullptr) return nullptr;
|
||||||
|
return info->at(chunk_size, id)->data();
|
||||||
|
}
|
||||||
|
|
||||||
|
void release_storage(ipc::storage_id_t id, std::size_t size) {
|
||||||
|
if (id < 0) {
|
||||||
|
ipc::error("[release_storage] id is invalid: id = %ld, size = %zd\n", (long)id, size);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
std::size_t chunk_size = calc_chunk_size(size);
|
||||||
|
auto info = chunk_storage_info(chunk_size);
|
||||||
|
if (info == nullptr) return;
|
||||||
|
info->lock_.lock();
|
||||||
|
info->pool_.release(id);
|
||||||
|
info->lock_.unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <ipc::relat Rp, ipc::relat Rc>
|
||||||
|
bool sub_rc(ipc::wr<Rp, Rc, ipc::trans::unicast>,
|
||||||
|
std::atomic<ipc::circ::cc_t> &/*conns*/, ipc::circ::cc_t /*curr_conns*/, ipc::circ::cc_t /*conn_id*/) noexcept {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <ipc::relat Rp, ipc::relat Rc>
|
||||||
|
bool sub_rc(ipc::wr<Rp, Rc, ipc::trans::broadcast>,
|
||||||
|
std::atomic<ipc::circ::cc_t> &conns, ipc::circ::cc_t curr_conns, ipc::circ::cc_t conn_id) noexcept {
|
||||||
|
auto last_conns = curr_conns & ~conn_id;
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
auto chunk_conns = conns.load(std::memory_order_acquire);
|
||||||
|
if (conns.compare_exchange_weak(chunk_conns, chunk_conns & last_conns, std::memory_order_release)) {
|
||||||
|
return (chunk_conns & last_conns) == 0;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Flag>
|
||||||
|
void recycle_storage(ipc::storage_id_t id, std::size_t size, ipc::circ::cc_t curr_conns, ipc::circ::cc_t conn_id) {
|
||||||
|
if (id < 0) {
|
||||||
|
ipc::error("[recycle_storage] id is invalid: id = %ld, size = %zd\n", (long)id, size);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
std::size_t chunk_size = calc_chunk_size(size);
|
||||||
|
auto info = chunk_storage_info(chunk_size);
|
||||||
|
if (info == nullptr) return;
|
||||||
|
|
||||||
|
auto chunk = info->at(chunk_size, id);
|
||||||
|
if (chunk == nullptr) return;
|
||||||
|
|
||||||
|
if (!sub_rc(Flag{}, chunk->conns(), curr_conns, conn_id)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
info->lock_.lock();
|
||||||
|
info->pool_.release(id);
|
||||||
|
info->lock_.unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename MsgT>
|
||||||
|
bool clear_message(void* p) {
|
||||||
|
auto msg = static_cast<MsgT*>(p);
|
||||||
|
if (msg->storage_) {
|
||||||
|
std::int32_t r_size = static_cast<std::int32_t>(ipc::data_length) + msg->remain_;
|
||||||
|
if (r_size <= 0) {
|
||||||
|
ipc::error("[clear_message] invalid msg size: %d\n", (int)r_size);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
release_storage(
|
||||||
|
*reinterpret_cast<ipc::storage_id_t*>(&msg->data_),
|
||||||
|
static_cast<std::size_t>(r_size));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct conn_info_head {
|
||||||
|
|
||||||
|
ipc::string name_;
|
||||||
|
msg_id_t cc_id_; // connection-info id
|
||||||
|
ipc::detail::waiter cc_waiter_, wt_waiter_, rd_waiter_;
|
||||||
|
ipc::shm::handle acc_h_;
|
||||||
|
|
||||||
|
conn_info_head(char const * name)
|
||||||
|
: name_ {name}
|
||||||
|
, cc_id_ {(cc_acc() == nullptr) ? 0 : cc_acc()->fetch_add(1, std::memory_order_relaxed)}
|
||||||
|
, cc_waiter_{("__CC_CONN__" + name_).c_str()}
|
||||||
|
, wt_waiter_{("__WT_CONN__" + name_).c_str()}
|
||||||
|
, rd_waiter_{("__RD_CONN__" + name_).c_str()}
|
||||||
|
, acc_h_ {("__AC_CONN__" + name_).c_str(), sizeof(acc_t)} {
|
||||||
|
}
|
||||||
|
|
||||||
|
void quit_waiting() {
|
||||||
|
cc_waiter_.quit_waiting();
|
||||||
|
wt_waiter_.quit_waiting();
|
||||||
|
rd_waiter_.quit_waiting();
|
||||||
|
}
|
||||||
|
|
||||||
|
auto acc() {
|
||||||
|
return static_cast<acc_t*>(acc_h_.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
auto& recv_cache() {
|
||||||
|
thread_local ipc::unordered_map<msg_id_t, cache_t> tls;
|
||||||
|
return tls;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename W, typename F>
|
||||||
|
bool wait_for(W& waiter, F&& pred, std::uint64_t tm) {
|
||||||
|
if (tm == 0) return !pred();
|
||||||
|
for (unsigned k = 0; pred();) {
|
||||||
|
bool ret = true;
|
||||||
|
ipc::sleep(k, [&k, &ret, &waiter, &pred, tm] {
|
||||||
|
ret = waiter.wait_if(std::forward<F>(pred), tm);
|
||||||
|
k = 0;
|
||||||
|
});
|
||||||
|
if (!ret) return false; // timeout or fail
|
||||||
|
if (k == 0) break; // k has been reset
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Policy,
|
||||||
|
std::size_t DataSize = ipc::data_length,
|
||||||
|
std::size_t AlignSize = (ipc::detail::min)(DataSize, alignof(std::max_align_t))>
|
||||||
|
struct queue_generator {
|
||||||
|
|
||||||
|
using queue_t = ipc::queue<msg_t<DataSize, AlignSize>, Policy>;
|
||||||
|
|
||||||
|
struct conn_info_t : conn_info_head {
|
||||||
|
queue_t que_;
|
||||||
|
|
||||||
|
conn_info_t(char const * name)
|
||||||
|
: conn_info_head{name}
|
||||||
|
, que_{("__QU_CONN__" +
|
||||||
|
ipc::to_string(DataSize) + "__" +
|
||||||
|
ipc::to_string(AlignSize) + "__" + name).c_str()} {
|
||||||
|
}
|
||||||
|
|
||||||
|
void disconnect_receiver() {
|
||||||
|
bool dis = que_.disconnect();
|
||||||
|
this->quit_waiting();
|
||||||
|
if (dis) {
|
||||||
|
this->recv_cache().clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename Policy>
|
||||||
|
struct detail_impl {
|
||||||
|
|
||||||
|
using policy_t = Policy;
|
||||||
|
using flag_t = typename policy_t::flag_t;
|
||||||
|
using queue_t = typename queue_generator<policy_t>::queue_t;
|
||||||
|
using conn_info_t = typename queue_generator<policy_t>::conn_info_t;
|
||||||
|
|
||||||
|
constexpr static conn_info_t* info_of(ipc::handle_t h) noexcept {
|
||||||
|
return static_cast<conn_info_t*>(h);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr static queue_t* queue_of(ipc::handle_t h) noexcept {
|
||||||
|
return (info_of(h) == nullptr) ? nullptr : &(info_of(h)->que_);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* API implementations */
|
||||||
|
|
||||||
|
static void disconnect(ipc::handle_t h) {
|
||||||
|
auto que = queue_of(h);
|
||||||
|
if (que == nullptr) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
que->shut_sending();
|
||||||
|
assert(info_of(h) != nullptr);
|
||||||
|
info_of(h)->disconnect_receiver();
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool reconnect(ipc::handle_t * ph, bool start_to_recv) {
|
||||||
|
assert(ph != nullptr);
|
||||||
|
assert(*ph != nullptr);
|
||||||
|
auto que = queue_of(*ph);
|
||||||
|
if (que == nullptr) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (start_to_recv) {
|
||||||
|
que->shut_sending();
|
||||||
|
if (que->connect()) { // wouldn't connect twice
|
||||||
|
info_of(*ph)->cc_waiter_.broadcast();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// start_to_recv == false
|
||||||
|
if (que->connected()) {
|
||||||
|
info_of(*ph)->disconnect_receiver();
|
||||||
|
}
|
||||||
|
return que->ready_sending();
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool connect(ipc::handle_t * ph, char const * name, bool start_to_recv) {
|
||||||
|
assert(ph != nullptr);
|
||||||
|
if (*ph == nullptr) {
|
||||||
|
*ph = ipc::mem::alloc<conn_info_t>(name);
|
||||||
|
}
|
||||||
|
return reconnect(ph, start_to_recv);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void destroy(ipc::handle_t h) {
|
||||||
|
disconnect(h);
|
||||||
|
ipc::mem::free(info_of(h));
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::size_t recv_count(ipc::handle_t h) noexcept {
|
||||||
|
auto que = queue_of(h);
|
||||||
|
if (que == nullptr) {
|
||||||
|
return ipc::invalid_value;
|
||||||
|
}
|
||||||
|
return que->conn_count();
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool wait_for_recv(ipc::handle_t h, std::size_t r_count, std::uint64_t tm) {
|
||||||
|
auto que = queue_of(h);
|
||||||
|
if (que == nullptr) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return wait_for(info_of(h)->cc_waiter_, [que, r_count] {
|
||||||
|
return que->conn_count() < r_count;
|
||||||
|
}, tm);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
static bool send(F&& gen_push, ipc::handle_t h, void const * data, std::size_t size) {
|
||||||
|
if (data == nullptr || size == 0) {
|
||||||
|
ipc::error("fail: send(%p, %zd)\n", data, size);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
auto que = queue_of(h);
|
||||||
|
if (que == nullptr) {
|
||||||
|
ipc::error("fail: send, queue_of(h) == nullptr\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (que->elems() == nullptr) {
|
||||||
|
ipc::error("fail: send, queue_of(h)->elems() == nullptr\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!que->ready_sending()) {
|
||||||
|
ipc::error("fail: send, que->ready_sending() == false\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
ipc::circ::cc_t conns = que->elems()->connections(std::memory_order_relaxed);
|
||||||
|
if (conns == 0) {
|
||||||
|
ipc::error("fail: send, there is no receiver on this connection.\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// calc a new message id
|
||||||
|
auto acc = info_of(h)->acc();
|
||||||
|
if (acc == nullptr) {
|
||||||
|
ipc::error("fail: send, info_of(h)->acc() == nullptr\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
auto msg_id = acc->fetch_add(1, std::memory_order_relaxed);
|
||||||
|
auto try_push = std::forward<F>(gen_push)(info_of(h), que, msg_id);
|
||||||
|
if (size > ipc::large_msg_limit) {
|
||||||
|
auto dat = acquire_storage(size, conns);
|
||||||
|
void * buf = dat.second;
|
||||||
|
if (buf != nullptr) {
|
||||||
|
std::memcpy(buf, data, size);
|
||||||
|
return try_push(static_cast<std::int32_t>(size) -
|
||||||
|
static_cast<std::int32_t>(ipc::data_length), &(dat.first), 0);
|
||||||
|
}
|
||||||
|
// try using message fragment
|
||||||
|
//ipc::log("fail: shm::handle for big message. msg_id: %zd, size: %zd\n", msg_id, size);
|
||||||
|
}
|
||||||
|
// push message fragment
|
||||||
|
std::int32_t offset = 0;
|
||||||
|
for (std::int32_t i = 0; i < static_cast<std::int32_t>(size / ipc::data_length); ++i, offset += ipc::data_length) {
|
||||||
|
if (!try_push(static_cast<std::int32_t>(size) - offset - static_cast<std::int32_t>(ipc::data_length),
|
||||||
|
static_cast<ipc::byte_t const *>(data) + offset, ipc::data_length)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if remain > 0, this is the last message fragment
|
||||||
|
std::int32_t remain = static_cast<std::int32_t>(size) - offset;
|
||||||
|
if (remain > 0) {
|
||||||
|
if (!try_push(remain - static_cast<std::int32_t>(ipc::data_length),
|
||||||
|
static_cast<ipc::byte_t const *>(data) + offset,
|
||||||
|
static_cast<std::size_t>(remain))) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) {
|
||||||
|
return send([tm](auto info, auto que, auto msg_id) {
|
||||||
|
return [tm, info, que, msg_id](std::int32_t remain, void const * data, std::size_t size) {
|
||||||
|
if (!wait_for(info->wt_waiter_, [&] {
|
||||||
|
return !que->push(
|
||||||
|
[](void*) { return true; },
|
||||||
|
info->cc_id_, msg_id, remain, data, size);
|
||||||
|
}, tm)) {
|
||||||
|
ipc::log("force_push: msg_id = %zd, remain = %d, size = %zd\n", msg_id, remain, size);
|
||||||
|
if (!que->force_push(
|
||||||
|
clear_message<typename queue_t::value_t>,
|
||||||
|
info->cc_id_, msg_id, remain, data, size)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info->rd_waiter_.broadcast();
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
}, h, data, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool try_send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) {
|
||||||
|
return send([tm](auto info, auto que, auto msg_id) {
|
||||||
|
return [tm, info, que, msg_id](std::int32_t remain, void const * data, std::size_t size) {
|
||||||
|
if (!wait_for(info->wt_waiter_, [&] {
|
||||||
|
return !que->push(
|
||||||
|
[](void*) { return true; },
|
||||||
|
info->cc_id_, msg_id, remain, data, size);
|
||||||
|
}, tm)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
info->rd_waiter_.broadcast();
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
}, h, data, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ipc::buff_t recv(ipc::handle_t h, std::uint64_t tm) {
|
||||||
|
auto que = queue_of(h);
|
||||||
|
if (que == nullptr) {
|
||||||
|
ipc::error("fail: recv, queue_of(h) == nullptr\n");
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
if (!que->connected()) {
|
||||||
|
// hasn't connected yet, just return.
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
auto& rc = info_of(h)->recv_cache();
|
||||||
|
for (;;) {
|
||||||
|
// pop a new message
|
||||||
|
typename queue_t::value_t msg;
|
||||||
|
if (!wait_for(info_of(h)->rd_waiter_, [que, &msg] {
|
||||||
|
return !que->pop(msg);
|
||||||
|
}, tm)) {
|
||||||
|
// pop failed, just return.
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
info_of(h)->wt_waiter_.broadcast();
|
||||||
|
if ((info_of(h)->acc() != nullptr) && (msg.cc_id_ == info_of(h)->cc_id_)) {
|
||||||
|
continue; // ignore message to self
|
||||||
|
}
|
||||||
|
// msg.remain_ may minus & abs(msg.remain_) < data_length
|
||||||
|
std::int32_t r_size = static_cast<std::int32_t>(ipc::data_length) + msg.remain_;
|
||||||
|
if (r_size <= 0) {
|
||||||
|
ipc::error("fail: recv, r_size = %d\n", (int)r_size);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
std::size_t msg_size = static_cast<std::size_t>(r_size);
|
||||||
|
// large message
|
||||||
|
if (msg.storage_) {
|
||||||
|
ipc::storage_id_t buf_id = *reinterpret_cast<ipc::storage_id_t*>(&msg.data_);
|
||||||
|
void* buf = find_storage(buf_id, msg_size);
|
||||||
|
if (buf != nullptr) {
|
||||||
|
struct recycle_t {
|
||||||
|
ipc::storage_id_t storage_id;
|
||||||
|
ipc::circ::cc_t curr_conns;
|
||||||
|
ipc::circ::cc_t conn_id;
|
||||||
|
} *r_info = ipc::mem::alloc<recycle_t>(recycle_t{
|
||||||
|
buf_id, que->elems()->connections(std::memory_order_relaxed), que->connected_id()
|
||||||
|
});
|
||||||
|
if (r_info == nullptr) {
|
||||||
|
ipc::log("fail: ipc::mem::alloc<recycle_t>.\n");
|
||||||
|
return ipc::buff_t{buf, msg_size}; // no recycle
|
||||||
|
} else {
|
||||||
|
return ipc::buff_t{buf, msg_size, [](void* p_info, std::size_t size) {
|
||||||
|
auto r_info = static_cast<recycle_t *>(p_info);
|
||||||
|
IPC_UNUSED_ auto finally = ipc::guard([r_info] {
|
||||||
|
ipc::mem::free(r_info);
|
||||||
|
});
|
||||||
|
recycle_storage<flag_t>(r_info->storage_id, size, r_info->curr_conns, r_info->conn_id);
|
||||||
|
}, r_info};
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ipc::log("fail: shm::handle for large message. msg_id: %zd, buf_id: %zd, size: %zd\n", msg.id_, buf_id, msg_size);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// find cache with msg.id_
|
||||||
|
auto cac_it = rc.find(msg.id_);
|
||||||
|
if (cac_it == rc.end()) {
|
||||||
|
if (msg_size <= ipc::data_length) {
|
||||||
|
return make_cache(msg.data_, msg_size);
|
||||||
|
}
|
||||||
|
// gc
|
||||||
|
if (rc.size() > 1024) {
|
||||||
|
std::vector<msg_id_t> need_del;
|
||||||
|
for (auto const & pair : rc) {
|
||||||
|
auto cmp = std::minmax(msg.id_, pair.first);
|
||||||
|
if (cmp.second - cmp.first > 8192) {
|
||||||
|
need_del.push_back(pair.first);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (auto id : need_del) rc.erase(id);
|
||||||
|
}
|
||||||
|
// cache the first message fragment
|
||||||
|
rc.emplace(msg.id_, cache_t { ipc::data_length, make_cache(msg.data_, msg_size) });
|
||||||
|
}
|
||||||
|
// has cached before this message
|
||||||
|
else {
|
||||||
|
auto& cac = cac_it->second;
|
||||||
|
// this is the last message fragment
|
||||||
|
if (msg.remain_ <= 0) {
|
||||||
|
cac.append(&(msg.data_), msg_size);
|
||||||
|
// finish this message, erase it from cache
|
||||||
|
auto buff = std::move(cac.buff_);
|
||||||
|
rc.erase(cac_it);
|
||||||
|
return buff;
|
||||||
|
}
|
||||||
|
// there are remain datas after this message
|
||||||
|
cac.append(&(msg.data_), ipc::data_length);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static ipc::buff_t try_recv(ipc::handle_t h) {
|
||||||
|
return recv(h, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
}; // detail_impl<Policy>
|
||||||
|
|
||||||
|
template <typename Flag>
|
||||||
|
using policy_t = ipc::policy::choose<ipc::circ::elem_array, Flag>;
|
||||||
|
|
||||||
|
} // internal-linkage
|
||||||
|
|
||||||
|
namespace ipc {
|
||||||
|
|
||||||
|
template <typename Flag>
|
||||||
|
ipc::handle_t chan_impl<Flag>::inited() {
|
||||||
|
ipc::detail::waiter::init();
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Flag>
|
||||||
|
bool chan_impl<Flag>::connect(ipc::handle_t * ph, char const * name, unsigned mode) {
|
||||||
|
return detail_impl<policy_t<Flag>>::connect(ph, name, mode & receiver);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Flag>
|
||||||
|
bool chan_impl<Flag>::reconnect(ipc::handle_t * ph, unsigned mode) {
|
||||||
|
return detail_impl<policy_t<Flag>>::reconnect(ph, mode & receiver);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Flag>
|
||||||
|
void chan_impl<Flag>::disconnect(ipc::handle_t h) {
|
||||||
|
detail_impl<policy_t<Flag>>::disconnect(h);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Flag>
|
||||||
|
void chan_impl<Flag>::destroy(ipc::handle_t h) {
|
||||||
|
detail_impl<policy_t<Flag>>::destroy(h);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Flag>
|
||||||
|
char const * chan_impl<Flag>::name(ipc::handle_t h) {
|
||||||
|
auto info = detail_impl<policy_t<Flag>>::info_of(h);
|
||||||
|
return (info == nullptr) ? nullptr : info->name_.c_str();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Flag>
|
||||||
|
std::size_t chan_impl<Flag>::recv_count(ipc::handle_t h) {
|
||||||
|
return detail_impl<policy_t<Flag>>::recv_count(h);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Flag>
|
||||||
|
bool chan_impl<Flag>::wait_for_recv(ipc::handle_t h, std::size_t r_count, std::uint64_t tm) {
|
||||||
|
return detail_impl<policy_t<Flag>>::wait_for_recv(h, r_count, tm);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Flag>
|
||||||
|
bool chan_impl<Flag>::send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) {
|
||||||
|
return detail_impl<policy_t<Flag>>::send(h, data, size, tm);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Flag>
|
||||||
|
buff_t chan_impl<Flag>::recv(ipc::handle_t h, std::uint64_t tm) {
|
||||||
|
return detail_impl<policy_t<Flag>>::recv(h, tm);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Flag>
|
||||||
|
bool chan_impl<Flag>::try_send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) {
|
||||||
|
return detail_impl<policy_t<Flag>>::try_send(h, data, size, tm);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Flag>
|
||||||
|
buff_t chan_impl<Flag>::try_recv(ipc::handle_t h) {
|
||||||
|
return detail_impl<policy_t<Flag>>::try_recv(h);
|
||||||
|
}
|
||||||
|
|
||||||
|
template struct chan_impl<ipc::wr<relat::single, relat::single, trans::unicast >>;
|
||||||
|
// template struct chan_impl<ipc::wr<relat::single, relat::multi , trans::unicast >>; // TBD
|
||||||
|
// template struct chan_impl<ipc::wr<relat::multi , relat::multi , trans::unicast >>; // TBD
|
||||||
|
template struct chan_impl<ipc::wr<relat::single, relat::multi , trans::broadcast>>;
|
||||||
|
template struct chan_impl<ipc::wr<relat::multi , relat::multi , trans::broadcast>>;
|
||||||
|
|
||||||
|
} // namespace ipc
|
||||||
@@ -0,0 +1,25 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <type_traits>
|
||||||
|
|
||||||
|
#include "libipc/def.h"
|
||||||
|
#include "libipc/prod_cons.h"
|
||||||
|
|
||||||
|
#include "libipc/circ/elem_array.h"
|
||||||
|
|
||||||
|
namespace ipc {
|
||||||
|
namespace policy {
|
||||||
|
|
||||||
|
template <template <typename, std::size_t...> class Elems, typename Flag>
|
||||||
|
struct choose;
|
||||||
|
|
||||||
|
template <typename Flag>
|
||||||
|
struct choose<circ::elem_array, Flag> {
|
||||||
|
using flag_t = Flag;
|
||||||
|
|
||||||
|
template <std::size_t DataSize, std::size_t AlignSize>
|
||||||
|
using elems_t = circ::elem_array<ipc::prod_cons_impl<flag_t>, DataSize, AlignSize>;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace policy
|
||||||
|
} // namespace ipc
|
||||||
@@ -0,0 +1,17 @@
|
|||||||
|
#include "libipc/pool_alloc.h"
|
||||||
|
|
||||||
|
#include "libipc/memory/resource.h"
|
||||||
|
|
||||||
|
namespace ipc {
|
||||||
|
namespace mem {
|
||||||
|
|
||||||
|
void* pool_alloc::alloc(std::size_t size) {
|
||||||
|
return async_pool_alloc::alloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void pool_alloc::free(void* p, std::size_t size) {
|
||||||
|
async_pool_alloc::free(p, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace mem
|
||||||
|
} // namespace ipc
|
||||||
@@ -0,0 +1,433 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <utility>
|
||||||
|
#include <cstring>
|
||||||
|
#include <type_traits>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "libipc/def.h"
|
||||||
|
|
||||||
|
#include "libipc/platform/detail.h"
|
||||||
|
#include "libipc/circ/elem_def.h"
|
||||||
|
#include "libipc/utility/log.h"
|
||||||
|
#include "libipc/utility/utility.h"
|
||||||
|
|
||||||
|
namespace ipc {
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////
|
||||||
|
/// producer-consumer implementation
|
||||||
|
////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
template <typename Flag>
|
||||||
|
struct prod_cons_impl;
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
|
||||||
|
|
||||||
|
template <std::size_t DataSize, std::size_t AlignSize>
|
||||||
|
struct elem_t {
|
||||||
|
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||||
|
};
|
||||||
|
|
||||||
|
alignas(cache_line_size) std::atomic<circ::u2_t> rd_; // read index
|
||||||
|
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
|
||||||
|
|
||||||
|
constexpr circ::u2_t cursor() const noexcept {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool push(W* /*wrapper*/, F&& f, E* elems) {
|
||||||
|
auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||||
|
if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) {
|
||||||
|
return false; // full
|
||||||
|
}
|
||||||
|
std::forward<F>(f)(&(elems[cur_wt].data_));
|
||||||
|
wt_.fetch_add(1, std::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'.
|
||||||
|
* So we could just disconnect all connections of receiver, and return false.
|
||||||
|
*/
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool force_push(W* wrapper, F&&, E*) {
|
||||||
|
wrapper->elems()->disconnect_receiver(~static_cast<circ::cc_t>(0u));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename R, typename E>
|
||||||
|
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) {
|
||||||
|
auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed));
|
||||||
|
if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) {
|
||||||
|
return false; // empty
|
||||||
|
}
|
||||||
|
std::forward<F>(f)(&(elems[cur_rd].data_));
|
||||||
|
std::forward<R>(out)(true);
|
||||||
|
rd_.fetch_add(1, std::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct prod_cons_impl<wr<relat::single, relat::multi , trans::unicast>>
|
||||||
|
: prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
|
||||||
|
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool force_push(W* wrapper, F&&, E*) {
|
||||||
|
wrapper->elems()->disconnect_receiver(1);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename R,
|
||||||
|
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
|
||||||
|
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
|
||||||
|
byte_t buff[DS];
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
auto cur_rd = rd_.load(std::memory_order_relaxed);
|
||||||
|
if (circ::index_of(cur_rd) ==
|
||||||
|
circ::index_of(wt_.load(std::memory_order_acquire))) {
|
||||||
|
return false; // empty
|
||||||
|
}
|
||||||
|
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
|
||||||
|
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
|
||||||
|
std::forward<F>(f)(buff);
|
||||||
|
std::forward<R>(out)(true);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct prod_cons_impl<wr<relat::multi , relat::multi, trans::unicast>>
|
||||||
|
: prod_cons_impl<wr<relat::single, relat::multi, trans::unicast>> {
|
||||||
|
|
||||||
|
using flag_t = std::uint64_t;
|
||||||
|
|
||||||
|
template <std::size_t DataSize, std::size_t AlignSize>
|
||||||
|
struct elem_t {
|
||||||
|
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||||
|
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
|
||||||
|
};
|
||||||
|
|
||||||
|
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
|
||||||
|
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool push(W* /*wrapper*/, F&& f, E* elems) {
|
||||||
|
circ::u2_t cur_ct, nxt_ct;
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
cur_ct = ct_.load(std::memory_order_relaxed);
|
||||||
|
if (circ::index_of(nxt_ct = cur_ct + 1) ==
|
||||||
|
circ::index_of(rd_.load(std::memory_order_acquire))) {
|
||||||
|
return false; // full
|
||||||
|
}
|
||||||
|
if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
auto* el = elems + circ::index_of(cur_ct);
|
||||||
|
std::forward<F>(f)(&(el->data_));
|
||||||
|
// set flag & try update wt
|
||||||
|
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||||
|
while (1) {
|
||||||
|
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
|
||||||
|
if (cur_ct != wt_.load(std::memory_order_relaxed)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if ((~cac_ct) != cur_ct) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
wt_.store(nxt_ct, std::memory_order_release);
|
||||||
|
cur_ct = nxt_ct;
|
||||||
|
nxt_ct = cur_ct + 1;
|
||||||
|
el = elems + circ::index_of(cur_ct);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool force_push(W* wrapper, F&&, E*) {
|
||||||
|
wrapper->elems()->disconnect_receiver(1);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename R,
|
||||||
|
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
|
||||||
|
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
|
||||||
|
byte_t buff[DS];
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
auto cur_rd = rd_.load(std::memory_order_relaxed);
|
||||||
|
auto cur_wt = wt_.load(std::memory_order_acquire);
|
||||||
|
auto id_rd = circ::index_of(cur_rd);
|
||||||
|
auto id_wt = circ::index_of(cur_wt);
|
||||||
|
if (id_rd == id_wt) {
|
||||||
|
auto* el = elems + id_wt;
|
||||||
|
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
|
||||||
|
if ((~cac_ct) != cur_wt) {
|
||||||
|
return false; // empty
|
||||||
|
}
|
||||||
|
if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) {
|
||||||
|
wt_.store(cur_wt + 1, std::memory_order_release);
|
||||||
|
}
|
||||||
|
k = 0;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
|
||||||
|
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
|
||||||
|
std::forward<F>(f)(buff);
|
||||||
|
std::forward<R>(out)(true);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct prod_cons_impl<wr<relat::single, relat::multi, trans::broadcast>> {
|
||||||
|
|
||||||
|
using rc_t = std::uint64_t;
|
||||||
|
|
||||||
|
enum : rc_t {
|
||||||
|
ep_mask = 0x00000000ffffffffull,
|
||||||
|
ep_incr = 0x0000000100000000ull
|
||||||
|
};
|
||||||
|
|
||||||
|
template <std::size_t DataSize, std::size_t AlignSize>
|
||||||
|
struct elem_t {
|
||||||
|
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||||
|
std::atomic<rc_t> rc_ { 0 }; // read-counter
|
||||||
|
};
|
||||||
|
|
||||||
|
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
|
||||||
|
alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer
|
||||||
|
|
||||||
|
circ::u2_t cursor() const noexcept {
|
||||||
|
return wt_.load(std::memory_order_acquire);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool push(W* wrapper, F&& f, E* elems) {
|
||||||
|
E* el;
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||||
|
if (cc == 0) return false; // no reader
|
||||||
|
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||||
|
// check all consumers have finished reading this element
|
||||||
|
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||||
|
circ::cc_t rem_cc = cur_rc & ep_mask;
|
||||||
|
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) {
|
||||||
|
return false; // has not finished yet
|
||||||
|
}
|
||||||
|
// consider rem_cc to be 0 here
|
||||||
|
if (el->rc_.compare_exchange_weak(
|
||||||
|
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
std::forward<F>(f)(&(el->data_));
|
||||||
|
wt_.fetch_add(1, std::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool force_push(W* wrapper, F&& f, E* elems) {
|
||||||
|
E* el;
|
||||||
|
epoch_ += ep_incr;
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||||
|
if (cc == 0) return false; // no reader
|
||||||
|
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||||
|
// check all consumers have finished reading this element
|
||||||
|
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||||
|
circ::cc_t rem_cc = cur_rc & ep_mask;
|
||||||
|
if (cc & rem_cc) {
|
||||||
|
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
|
||||||
|
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
|
||||||
|
if (cc == 0) return false; // no reader
|
||||||
|
}
|
||||||
|
// just compare & exchange
|
||||||
|
if (el->rc_.compare_exchange_weak(
|
||||||
|
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
std::forward<F>(f)(&(el->data_));
|
||||||
|
wt_.fetch_add(1, std::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename R, typename E>
|
||||||
|
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) {
|
||||||
|
if (cur == cursor()) return false; // acquire
|
||||||
|
auto* el = elems + circ::index_of(cur++);
|
||||||
|
std::forward<F>(f)(&(el->data_));
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||||
|
if ((cur_rc & ep_mask) == 0) {
|
||||||
|
std::forward<R>(out)(true);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
auto nxt_rc = cur_rc & ~static_cast<rc_t>(wrapper->connected_id());
|
||||||
|
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
|
||||||
|
std::forward<R>(out)((nxt_rc & ep_mask) == 0);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct prod_cons_impl<wr<relat::multi, relat::multi, trans::broadcast>> {
|
||||||
|
|
||||||
|
using rc_t = std::uint64_t;
|
||||||
|
using flag_t = std::uint64_t;
|
||||||
|
|
||||||
|
enum : rc_t {
|
||||||
|
rc_mask = 0x00000000ffffffffull,
|
||||||
|
ep_mask = 0x00ffffffffffffffull,
|
||||||
|
ep_incr = 0x0100000000000000ull,
|
||||||
|
ic_mask = 0xff000000ffffffffull,
|
||||||
|
ic_incr = 0x0000000100000000ull
|
||||||
|
};
|
||||||
|
|
||||||
|
template <std::size_t DataSize, std::size_t AlignSize>
|
||||||
|
struct elem_t {
|
||||||
|
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||||
|
std::atomic<rc_t > rc_ { 0 }; // read-counter
|
||||||
|
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
|
||||||
|
};
|
||||||
|
|
||||||
|
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
|
||||||
|
alignas(cache_line_size) std::atomic<rc_t> epoch_ { 0 };
|
||||||
|
|
||||||
|
circ::u2_t cursor() const noexcept {
|
||||||
|
return ct_.load(std::memory_order_acquire);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr static rc_t inc_rc(rc_t rc) noexcept {
|
||||||
|
return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr static rc_t inc_mask(rc_t rc) noexcept {
|
||||||
|
return inc_rc(rc) & ~rc_mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool push(W* wrapper, F&& f, E* elems) {
|
||||||
|
E* el;
|
||||||
|
circ::u2_t cur_ct;
|
||||||
|
rc_t epoch = epoch_.load(std::memory_order_acquire);
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||||
|
if (cc == 0) return false; // no reader
|
||||||
|
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
|
||||||
|
// check all consumers have finished reading this element
|
||||||
|
auto cur_rc = el->rc_.load(std::memory_order_relaxed);
|
||||||
|
circ::cc_t rem_cc = cur_rc & rc_mask;
|
||||||
|
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) {
|
||||||
|
return false; // has not finished yet
|
||||||
|
}
|
||||||
|
else if (!rem_cc) {
|
||||||
|
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
|
||||||
|
if ((cur_fl != cur_ct) && cur_fl) {
|
||||||
|
return false; // full
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// consider rem_cc to be 0 here
|
||||||
|
if (el->rc_.compare_exchange_weak(
|
||||||
|
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed) &&
|
||||||
|
epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
// only one thread/process would touch here at one time
|
||||||
|
ct_.store(cur_ct + 1, std::memory_order_release);
|
||||||
|
std::forward<F>(f)(&(el->data_));
|
||||||
|
// set flag & try update wt
|
||||||
|
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool force_push(W* wrapper, F&& f, E* elems) {
|
||||||
|
E* el;
|
||||||
|
circ::u2_t cur_ct;
|
||||||
|
rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||||
|
if (cc == 0) return false; // no reader
|
||||||
|
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
|
||||||
|
// check all consumers have finished reading this element
|
||||||
|
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||||
|
circ::cc_t rem_cc = cur_rc & rc_mask;
|
||||||
|
if (cc & rem_cc) {
|
||||||
|
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
|
||||||
|
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
|
||||||
|
if (cc == 0) return false; // no reader
|
||||||
|
}
|
||||||
|
// just compare & exchange
|
||||||
|
if (el->rc_.compare_exchange_weak(
|
||||||
|
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed)) {
|
||||||
|
if (epoch == epoch_.load(std::memory_order_acquire)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
else if (push(wrapper, std::forward<F>(f), elems)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
// only one thread/process would touch here at one time
|
||||||
|
ct_.store(cur_ct + 1, std::memory_order_release);
|
||||||
|
std::forward<F>(f)(&(el->data_));
|
||||||
|
// set flag & try update wt
|
||||||
|
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename R, typename E, std::size_t N>
|
||||||
|
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) {
|
||||||
|
auto* el = elems + circ::index_of(cur);
|
||||||
|
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
|
||||||
|
if (cur_fl != ~static_cast<flag_t>(cur)) {
|
||||||
|
return false; // empty
|
||||||
|
}
|
||||||
|
++cur;
|
||||||
|
std::forward<F>(f)(&(el->data_));
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||||
|
if ((cur_rc & rc_mask) == 0) {
|
||||||
|
std::forward<R>(out)(true);
|
||||||
|
el->f_ct_.store(cur + N - 1, std::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
auto nxt_rc = inc_rc(cur_rc) & ~static_cast<rc_t>(wrapper->connected_id());
|
||||||
|
bool last_one = false;
|
||||||
|
if ((last_one = (nxt_rc & rc_mask) == 0)) {
|
||||||
|
el->f_ct_.store(cur + N - 1, std::memory_order_release);
|
||||||
|
}
|
||||||
|
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
|
||||||
|
std::forward<R>(out)(last_one);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace ipc
|
||||||
@@ -0,0 +1,216 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <type_traits>
|
||||||
|
#include <new>
|
||||||
|
#include <utility> // [[since C++14]]: std::exchange
|
||||||
|
#include <algorithm>
|
||||||
|
#include <atomic>
|
||||||
|
#include <tuple>
|
||||||
|
#include <thread>
|
||||||
|
#include <chrono>
|
||||||
|
#include <string>
|
||||||
|
#include <cassert> // assert
|
||||||
|
|
||||||
|
#include "libipc/def.h"
|
||||||
|
#include "libipc/shm.h"
|
||||||
|
#include "libipc/rw_lock.h"
|
||||||
|
|
||||||
|
#include "libipc/utility/log.h"
|
||||||
|
#include "libipc/platform/detail.h"
|
||||||
|
#include "libipc/circ/elem_def.h"
|
||||||
|
|
||||||
|
namespace ipc {
|
||||||
|
namespace detail {
|
||||||
|
|
||||||
|
class queue_conn {
|
||||||
|
protected:
|
||||||
|
circ::cc_t connected_ = 0;
|
||||||
|
shm::handle elems_h_;
|
||||||
|
|
||||||
|
template <typename Elems>
|
||||||
|
Elems* open(char const * name) {
|
||||||
|
if (name == nullptr || name[0] == '\0') {
|
||||||
|
ipc::error("fail open waiter: name is empty!\n");
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
if (!elems_h_.acquire(name, sizeof(Elems))) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
auto elems = static_cast<Elems*>(elems_h_.get());
|
||||||
|
if (elems == nullptr) {
|
||||||
|
ipc::error("fail acquire elems: %s\n", name);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
elems->init();
|
||||||
|
return elems;
|
||||||
|
}
|
||||||
|
|
||||||
|
void close() {
|
||||||
|
elems_h_.release();
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
queue_conn() = default;
|
||||||
|
queue_conn(const queue_conn&) = delete;
|
||||||
|
queue_conn& operator=(const queue_conn&) = delete;
|
||||||
|
|
||||||
|
bool connected() const noexcept {
|
||||||
|
return connected_ != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
circ::cc_t connected_id() const noexcept {
|
||||||
|
return connected_;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Elems>
|
||||||
|
auto connect(Elems* elems) noexcept
|
||||||
|
/*needs 'optional' here*/
|
||||||
|
-> std::tuple<bool, bool, decltype(std::declval<Elems>().cursor())> {
|
||||||
|
if (elems == nullptr) return {};
|
||||||
|
// if it's already connected, just return
|
||||||
|
if (connected()) return {connected(), false, 0};
|
||||||
|
connected_ = elems->connect_receiver();
|
||||||
|
return {connected(), true, elems->cursor()};
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Elems>
|
||||||
|
bool disconnect(Elems* elems) noexcept {
|
||||||
|
if (elems == nullptr) return false;
|
||||||
|
// if it's already disconnected, just return false
|
||||||
|
if (!connected()) return false;
|
||||||
|
elems->disconnect_receiver(std::exchange(connected_, 0));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename Elems>
|
||||||
|
class queue_base : public queue_conn {
|
||||||
|
using base_t = queue_conn;
|
||||||
|
|
||||||
|
public:
|
||||||
|
using elems_t = Elems;
|
||||||
|
using policy_t = typename elems_t::policy_t;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
elems_t * elems_ = nullptr;
|
||||||
|
decltype(std::declval<elems_t>().cursor()) cursor_ = 0;
|
||||||
|
bool sender_flag_ = false;
|
||||||
|
|
||||||
|
public:
|
||||||
|
using base_t::base_t;
|
||||||
|
|
||||||
|
queue_base() = default;
|
||||||
|
|
||||||
|
explicit queue_base(char const * name)
|
||||||
|
: queue_base{} {
|
||||||
|
elems_ = open<elems_t>(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
explicit queue_base(elems_t * elems) noexcept
|
||||||
|
: queue_base{} {
|
||||||
|
assert(elems != nullptr);
|
||||||
|
elems_ = elems;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* not virtual */ ~queue_base() {
|
||||||
|
base_t::close();
|
||||||
|
}
|
||||||
|
|
||||||
|
elems_t * elems() noexcept { return elems_; }
|
||||||
|
elems_t const * elems() const noexcept { return elems_; }
|
||||||
|
|
||||||
|
bool ready_sending() noexcept {
|
||||||
|
if (elems_ == nullptr) return false;
|
||||||
|
return sender_flag_ || (sender_flag_ = elems_->connect_sender());
|
||||||
|
}
|
||||||
|
|
||||||
|
void shut_sending() noexcept {
|
||||||
|
if (elems_ == nullptr) return;
|
||||||
|
if (!sender_flag_) return;
|
||||||
|
elems_->disconnect_sender();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool connect() noexcept {
|
||||||
|
auto tp = base_t::connect(elems_);
|
||||||
|
if (std::get<0>(tp) && std::get<1>(tp)) {
|
||||||
|
cursor_ = std::get<2>(tp);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return std::get<0>(tp);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool disconnect() noexcept {
|
||||||
|
return base_t::disconnect(elems_);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::size_t conn_count() const noexcept {
|
||||||
|
return (elems_ == nullptr) ? static_cast<std::size_t>(invalid_value) : elems_->conn_count();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool valid() const noexcept {
|
||||||
|
return elems_ != nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool empty() const noexcept {
|
||||||
|
return !valid() || (cursor_ == elems_->cursor());
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename F, typename... P>
|
||||||
|
bool push(F&& prep, P&&... params) {
|
||||||
|
if (elems_ == nullptr) return false;
|
||||||
|
return elems_->push(this, [&](void* p) {
|
||||||
|
if (prep(p)) ::new (p) T(std::forward<P>(params)...);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename F, typename... P>
|
||||||
|
bool force_push(F&& prep, P&&... params) {
|
||||||
|
if (elems_ == nullptr) return false;
|
||||||
|
return elems_->force_push(this, [&](void* p) {
|
||||||
|
if (prep(p)) ::new (p) T(std::forward<P>(params)...);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename F>
|
||||||
|
bool pop(T& item, F&& out) {
|
||||||
|
if (elems_ == nullptr) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return elems_->pop(this, &(this->cursor_), [&item](void* p) {
|
||||||
|
::new (&item) T(std::move(*static_cast<T*>(p)));
|
||||||
|
}, std::forward<F>(out));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace detail
|
||||||
|
|
||||||
|
template <typename T, typename Policy>
|
||||||
|
class queue final : public detail::queue_base<typename Policy::template elems_t<sizeof(T), alignof(T)>> {
|
||||||
|
using base_t = detail::queue_base<typename Policy::template elems_t<sizeof(T), alignof(T)>>;
|
||||||
|
|
||||||
|
public:
|
||||||
|
using value_t = T;
|
||||||
|
|
||||||
|
using base_t::base_t;
|
||||||
|
|
||||||
|
template <typename... P>
|
||||||
|
bool push(P&&... params) {
|
||||||
|
return base_t::template push<T>(std::forward<P>(params)...);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename... P>
|
||||||
|
bool force_push(P&&... params) {
|
||||||
|
return base_t::template force_push<T>(std::forward<P>(params)...);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pop(T& item) {
|
||||||
|
return base_t::pop(item, [](bool) {});
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
bool pop(T& item, F&& out) {
|
||||||
|
return base_t::pop(item, std::forward<F>(out));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace ipc
|
||||||
@@ -0,0 +1,103 @@
|
|||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
#include "libipc/shm.h"
|
||||||
|
|
||||||
|
#include "libipc/utility/pimpl.h"
|
||||||
|
#include "libipc/memory/resource.h"
|
||||||
|
|
||||||
|
namespace ipc {
|
||||||
|
namespace shm {
|
||||||
|
|
||||||
|
class handle::handle_ : public pimpl<handle_> {
|
||||||
|
public:
|
||||||
|
shm::id_t id_ = nullptr;
|
||||||
|
void* m_ = nullptr;
|
||||||
|
|
||||||
|
ipc::string n_;
|
||||||
|
std::size_t s_ = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
handle::handle()
|
||||||
|
: p_(p_->make()) {
|
||||||
|
}
|
||||||
|
|
||||||
|
handle::handle(char const * name, std::size_t size, unsigned mode)
|
||||||
|
: handle() {
|
||||||
|
acquire(name, size, mode);
|
||||||
|
}
|
||||||
|
|
||||||
|
handle::handle(handle&& rhs)
|
||||||
|
: handle() {
|
||||||
|
swap(rhs);
|
||||||
|
}
|
||||||
|
|
||||||
|
handle::~handle() {
|
||||||
|
release();
|
||||||
|
p_->clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
void handle::swap(handle& rhs) {
|
||||||
|
std::swap(p_, rhs.p_);
|
||||||
|
}
|
||||||
|
|
||||||
|
handle& handle::operator=(handle rhs) {
|
||||||
|
swap(rhs);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool handle::valid() const noexcept {
|
||||||
|
return impl(p_)->m_ != nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::size_t handle::size() const noexcept {
|
||||||
|
return impl(p_)->s_;
|
||||||
|
}
|
||||||
|
|
||||||
|
char const * handle::name() const noexcept {
|
||||||
|
return impl(p_)->n_.c_str();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::int32_t handle::ref() const noexcept {
|
||||||
|
return shm::get_ref(impl(p_)->id_);
|
||||||
|
}
|
||||||
|
|
||||||
|
void handle::sub_ref() noexcept {
|
||||||
|
shm::sub_ref(impl(p_)->id_);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool handle::acquire(char const * name, std::size_t size, unsigned mode) {
|
||||||
|
release();
|
||||||
|
impl(p_)->id_ = shm::acquire((impl(p_)->n_ = name).c_str(), size, mode);
|
||||||
|
impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_));
|
||||||
|
return valid();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::int32_t handle::release() {
|
||||||
|
if (impl(p_)->id_ == nullptr) return -1;
|
||||||
|
return shm::release(detach());
|
||||||
|
}
|
||||||
|
|
||||||
|
void* handle::get() const {
|
||||||
|
return impl(p_)->m_;
|
||||||
|
}
|
||||||
|
|
||||||
|
void handle::attach(id_t id) {
|
||||||
|
if (id == nullptr) return;
|
||||||
|
release();
|
||||||
|
impl(p_)->id_ = id;
|
||||||
|
impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_));
|
||||||
|
}
|
||||||
|
|
||||||
|
id_t handle::detach() {
|
||||||
|
auto old = impl(p_)->id_;
|
||||||
|
impl(p_)->id_ = nullptr;
|
||||||
|
impl(p_)->m_ = nullptr;
|
||||||
|
impl(p_)->s_ = 0;
|
||||||
|
impl(p_)->n_.clear();
|
||||||
|
return old;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace shm
|
||||||
|
} // namespace ipc
|
||||||
@@ -0,0 +1,83 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <utility>
|
||||||
|
#include <string>
|
||||||
|
#include <mutex>
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
|
#include "libipc/def.h"
|
||||||
|
#include "libipc/mutex.h"
|
||||||
|
#include "libipc/condition.h"
|
||||||
|
#include "libipc/platform/detail.h"
|
||||||
|
|
||||||
|
namespace ipc {
|
||||||
|
namespace detail {
|
||||||
|
|
||||||
|
class waiter {
|
||||||
|
ipc::sync::condition cond_;
|
||||||
|
ipc::sync::mutex lock_;
|
||||||
|
std::atomic<bool> quit_ {false};
|
||||||
|
|
||||||
|
public:
|
||||||
|
static void init();
|
||||||
|
|
||||||
|
waiter() = default;
|
||||||
|
waiter(char const *name) {
|
||||||
|
open(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
~waiter() {
|
||||||
|
close();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool valid() const noexcept {
|
||||||
|
return cond_.valid() && lock_.valid();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool open(char const *name) noexcept {
|
||||||
|
quit_.store(false, std::memory_order_relaxed);
|
||||||
|
if (!cond_.open((std::string{"_waiter_cond_"} + name).c_str())) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!lock_.open((std::string{"_waiter_lock_"} + name).c_str())) {
|
||||||
|
cond_.close();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return valid();
|
||||||
|
}
|
||||||
|
|
||||||
|
void close() noexcept {
|
||||||
|
cond_.close();
|
||||||
|
lock_.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
bool wait_if(F &&pred, std::uint64_t tm = ipc::invalid_value) noexcept {
|
||||||
|
IPC_UNUSED_ std::lock_guard<ipc::sync::mutex> guard {lock_};
|
||||||
|
while ([this, &pred] {
|
||||||
|
return !quit_.load(std::memory_order_relaxed)
|
||||||
|
&& std::forward<F>(pred)();
|
||||||
|
}()) {
|
||||||
|
if (!cond_.wait(lock_, tm)) return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool notify() noexcept {
|
||||||
|
std::lock_guard<ipc::sync::mutex>{lock_}; // barrier
|
||||||
|
return cond_.notify(lock_);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool broadcast() noexcept {
|
||||||
|
std::lock_guard<ipc::sync::mutex>{lock_}; // barrier
|
||||||
|
return cond_.broadcast(lock_);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool quit_waiting() {
|
||||||
|
quit_.store(true, std::memory_order_release);
|
||||||
|
return broadcast();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace detail
|
||||||
|
} // namespace ipc
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
https://github.com/mutouyun/cpp-ipc
|
||||||
|
|
||||||
|
A high-performance inter-process communication library using shared memory on Linux/Windows.
|
||||||
文件差异内容过多而无法显示
加载差异
@@ -0,0 +1,316 @@
|
|||||||
|
// jpgd.h - C++ class for JPEG decompression.
|
||||||
|
// Public domain, Rich Geldreich <richgel99@gmail.com>
|
||||||
|
#ifndef JPEG_DECODER_H
|
||||||
|
#define JPEG_DECODER_H
|
||||||
|
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <setjmp.h>
|
||||||
|
|
||||||
|
namespace jpgd
|
||||||
|
{
|
||||||
|
typedef unsigned char uint8;
|
||||||
|
typedef signed short int16;
|
||||||
|
typedef unsigned short uint16;
|
||||||
|
typedef unsigned int uint;
|
||||||
|
typedef signed int int32;
|
||||||
|
|
||||||
|
// Loads a JPEG image from a memory buffer or a file.
|
||||||
|
// req_comps can be 1 (grayscale), 3 (RGB), or 4 (RGBA).
|
||||||
|
// On return, width/height will be set to the image's dimensions, and actual_comps will be set to the either 1 (grayscale) or 3 (RGB).
|
||||||
|
// Notes: For more control over where and how the source data is read, see the decompress_jpeg_image_from_stream() function below, or call the jpeg_decoder class directly.
|
||||||
|
// Requesting a 8 or 32bpp image is currently a little faster than 24bpp because the jpeg_decoder class itself currently always unpacks to either 8 or 32bpp.
|
||||||
|
// BEGIN EPIC MOD
|
||||||
|
//unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps);
|
||||||
|
unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format);
|
||||||
|
// END EPIC MOD
|
||||||
|
unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps);
|
||||||
|
|
||||||
|
// Success/failure error codes.
|
||||||
|
enum jpgd_status
|
||||||
|
{
|
||||||
|
JPGD_SUCCESS = 0, JPGD_FAILED = -1, JPGD_DONE = 1,
|
||||||
|
JPGD_BAD_DHT_COUNTS = -256, JPGD_BAD_DHT_INDEX, JPGD_BAD_DHT_MARKER, JPGD_BAD_DQT_MARKER, JPGD_BAD_DQT_TABLE,
|
||||||
|
JPGD_BAD_PRECISION, JPGD_BAD_HEIGHT, JPGD_BAD_WIDTH, JPGD_TOO_MANY_COMPONENTS,
|
||||||
|
JPGD_BAD_SOF_LENGTH, JPGD_BAD_VARIABLE_MARKER, JPGD_BAD_DRI_LENGTH, JPGD_BAD_SOS_LENGTH,
|
||||||
|
JPGD_BAD_SOS_COMP_ID, JPGD_W_EXTRA_BYTES_BEFORE_MARKER, JPGD_NO_ARITHMITIC_SUPPORT, JPGD_UNEXPECTED_MARKER,
|
||||||
|
JPGD_NOT_JPEG, JPGD_UNSUPPORTED_MARKER, JPGD_BAD_DQT_LENGTH, JPGD_TOO_MANY_BLOCKS,
|
||||||
|
JPGD_UNDEFINED_QUANT_TABLE, JPGD_UNDEFINED_HUFF_TABLE, JPGD_NOT_SINGLE_SCAN, JPGD_UNSUPPORTED_COLORSPACE,
|
||||||
|
JPGD_UNSUPPORTED_SAMP_FACTORS, JPGD_DECODE_ERROR, JPGD_BAD_RESTART_MARKER, JPGD_ASSERTION_ERROR,
|
||||||
|
JPGD_BAD_SOS_SPECTRAL, JPGD_BAD_SOS_SUCCESSIVE, JPGD_STREAM_READ, JPGD_NOTENOUGHMEM
|
||||||
|
};
|
||||||
|
|
||||||
|
// Input stream interface.
|
||||||
|
// Derive from this class to read input data from sources other than files or memory. Set m_eof_flag to true when no more data is available.
|
||||||
|
// The decoder is rather greedy: it will keep on calling this method until its internal input buffer is full, or until the EOF flag is set.
|
||||||
|
// It the input stream contains data after the JPEG stream's EOI (end of image) marker it will probably be pulled into the internal buffer.
|
||||||
|
// Call the get_total_bytes_read() method to determine the actual size of the JPEG stream after successful decoding.
|
||||||
|
class jpeg_decoder_stream
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
jpeg_decoder_stream() { }
|
||||||
|
virtual ~jpeg_decoder_stream() { }
|
||||||
|
|
||||||
|
// The read() method is called when the internal input buffer is empty.
|
||||||
|
// Parameters:
|
||||||
|
// pBuf - input buffer
|
||||||
|
// max_bytes_to_read - maximum bytes that can be written to pBuf
|
||||||
|
// pEOF_flag - set this to true if at end of stream (no more bytes remaining)
|
||||||
|
// Returns -1 on error, otherwise return the number of bytes actually written to the buffer (which may be 0).
|
||||||
|
// Notes: This method will be called in a loop until you set *pEOF_flag to true or the internal buffer is full.
|
||||||
|
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
// stdio FILE stream class.
|
||||||
|
class jpeg_decoder_file_stream : public jpeg_decoder_stream
|
||||||
|
{
|
||||||
|
jpeg_decoder_file_stream(const jpeg_decoder_file_stream &);
|
||||||
|
jpeg_decoder_file_stream &operator =(const jpeg_decoder_file_stream &);
|
||||||
|
|
||||||
|
FILE *m_pFile;
|
||||||
|
bool m_eof_flag, m_error_flag;
|
||||||
|
|
||||||
|
public:
|
||||||
|
jpeg_decoder_file_stream();
|
||||||
|
virtual ~jpeg_decoder_file_stream();
|
||||||
|
|
||||||
|
bool open(const char *Pfilename);
|
||||||
|
void close();
|
||||||
|
|
||||||
|
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Memory stream class.
|
||||||
|
class jpeg_decoder_mem_stream : public jpeg_decoder_stream
|
||||||
|
{
|
||||||
|
const uint8 *m_pSrc_data;
|
||||||
|
uint m_ofs, m_size;
|
||||||
|
|
||||||
|
public:
|
||||||
|
jpeg_decoder_mem_stream() : m_pSrc_data(NULL), m_ofs(0), m_size(0) { }
|
||||||
|
jpeg_decoder_mem_stream(const uint8 *pSrc_data, uint size) : m_pSrc_data(pSrc_data), m_ofs(0), m_size(size) { }
|
||||||
|
|
||||||
|
virtual ~jpeg_decoder_mem_stream() { }
|
||||||
|
|
||||||
|
bool open(const uint8 *pSrc_data, uint size);
|
||||||
|
void close() { m_pSrc_data = NULL; m_ofs = 0; m_size = 0; }
|
||||||
|
|
||||||
|
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Loads JPEG file from a jpeg_decoder_stream.
|
||||||
|
unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps);
|
||||||
|
|
||||||
|
enum
|
||||||
|
{
|
||||||
|
JPGD_IN_BUF_SIZE = 8192, JPGD_MAX_BLOCKS_PER_MCU = 10, JPGD_MAX_HUFF_TABLES = 8, JPGD_MAX_QUANT_TABLES = 4,
|
||||||
|
JPGD_MAX_COMPONENTS = 4, JPGD_MAX_COMPS_IN_SCAN = 4, JPGD_MAX_BLOCKS_PER_ROW = 8192, JPGD_MAX_HEIGHT = 16384, JPGD_MAX_WIDTH = 16384
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef int16 jpgd_quant_t;
|
||||||
|
typedef int16 jpgd_block_t;
|
||||||
|
|
||||||
|
class jpeg_decoder
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
// Call get_error_code() after constructing to determine if the stream is valid or not. You may call the get_width(), get_height(), etc.
|
||||||
|
// methods after the constructor is called. You may then either destruct the object, or begin decoding the image by calling begin_decoding(), then decode() on each scanline.
|
||||||
|
jpeg_decoder(jpeg_decoder_stream *pStream);
|
||||||
|
|
||||||
|
~jpeg_decoder();
|
||||||
|
|
||||||
|
// Call this method after constructing the object to begin decompression.
|
||||||
|
// If JPGD_SUCCESS is returned you may then call decode() on each scanline.
|
||||||
|
int begin_decoding();
|
||||||
|
|
||||||
|
// Returns the next scan line.
|
||||||
|
// For grayscale images, pScan_line will point to a buffer containing 8-bit pixels (get_bytes_per_pixel() will return 1).
|
||||||
|
// Otherwise, it will always point to a buffer containing 32-bit RGBA pixels (A will always be 255, and get_bytes_per_pixel() will return 4).
|
||||||
|
// Returns JPGD_SUCCESS if a scan line has been returned.
|
||||||
|
// Returns JPGD_DONE if all scan lines have been returned.
|
||||||
|
// Returns JPGD_FAILED if an error occurred. Call get_error_code() for a more info.
|
||||||
|
int decode(const void** pScan_line, uint* pScan_line_len);
|
||||||
|
|
||||||
|
inline jpgd_status get_error_code() const { return m_error_code; }
|
||||||
|
|
||||||
|
inline int get_width() const { return m_image_x_size; }
|
||||||
|
inline int get_height() const { return m_image_y_size; }
|
||||||
|
|
||||||
|
inline int get_num_components() const { return m_comps_in_frame; }
|
||||||
|
|
||||||
|
inline int get_bytes_per_pixel() const { return m_dest_bytes_per_pixel; }
|
||||||
|
inline int get_bytes_per_scan_line() const { return m_image_x_size * get_bytes_per_pixel(); }
|
||||||
|
|
||||||
|
// Returns the total number of bytes actually consumed by the decoder (which should equal the actual size of the JPEG file).
|
||||||
|
inline int get_total_bytes_read() const { return m_total_bytes_read; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
jpeg_decoder(const jpeg_decoder &);
|
||||||
|
jpeg_decoder &operator =(const jpeg_decoder &);
|
||||||
|
|
||||||
|
typedef void (*pDecode_block_func)(jpeg_decoder *, int, int, int);
|
||||||
|
|
||||||
|
struct huff_tables
|
||||||
|
{
|
||||||
|
bool ac_table;
|
||||||
|
uint look_up[256];
|
||||||
|
uint look_up2[256];
|
||||||
|
uint8 code_size[256];
|
||||||
|
uint tree[512];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct coeff_buf
|
||||||
|
{
|
||||||
|
uint8 *pData;
|
||||||
|
int block_num_x, block_num_y;
|
||||||
|
int block_len_x, block_len_y;
|
||||||
|
int block_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mem_block
|
||||||
|
{
|
||||||
|
mem_block *m_pNext;
|
||||||
|
size_t m_used_count;
|
||||||
|
size_t m_size;
|
||||||
|
char m_data[1];
|
||||||
|
};
|
||||||
|
|
||||||
|
jmp_buf m_jmp_state;
|
||||||
|
mem_block *m_pMem_blocks;
|
||||||
|
int m_image_x_size;
|
||||||
|
int m_image_y_size;
|
||||||
|
jpeg_decoder_stream *m_pStream;
|
||||||
|
int m_progressive_flag;
|
||||||
|
uint8 m_huff_ac[JPGD_MAX_HUFF_TABLES];
|
||||||
|
uint8* m_huff_num[JPGD_MAX_HUFF_TABLES]; // pointer to number of Huffman codes per bit size
|
||||||
|
uint8* m_huff_val[JPGD_MAX_HUFF_TABLES]; // pointer to Huffman codes per bit size
|
||||||
|
jpgd_quant_t* m_quant[JPGD_MAX_QUANT_TABLES]; // pointer to quantization tables
|
||||||
|
int m_scan_type; // Gray, Yh1v1, Yh1v2, Yh2v1, Yh2v2 (CMYK111, CMYK4114 no longer supported)
|
||||||
|
int m_comps_in_frame; // # of components in frame
|
||||||
|
int m_comp_h_samp[JPGD_MAX_COMPONENTS]; // component's horizontal sampling factor
|
||||||
|
int m_comp_v_samp[JPGD_MAX_COMPONENTS]; // component's vertical sampling factor
|
||||||
|
int m_comp_quant[JPGD_MAX_COMPONENTS]; // component's quantization table selector
|
||||||
|
int m_comp_ident[JPGD_MAX_COMPONENTS]; // component's ID
|
||||||
|
int m_comp_h_blocks[JPGD_MAX_COMPONENTS];
|
||||||
|
int m_comp_v_blocks[JPGD_MAX_COMPONENTS];
|
||||||
|
int m_comps_in_scan; // # of components in scan
|
||||||
|
int m_comp_list[JPGD_MAX_COMPS_IN_SCAN]; // components in this scan
|
||||||
|
int m_comp_dc_tab[JPGD_MAX_COMPONENTS]; // component's DC Huffman coding table selector
|
||||||
|
int m_comp_ac_tab[JPGD_MAX_COMPONENTS]; // component's AC Huffman coding table selector
|
||||||
|
int m_spectral_start; // spectral selection start
|
||||||
|
int m_spectral_end; // spectral selection end
|
||||||
|
int m_successive_low; // successive approximation low
|
||||||
|
int m_successive_high; // successive approximation high
|
||||||
|
int m_max_mcu_x_size; // MCU's max. X size in pixels
|
||||||
|
int m_max_mcu_y_size; // MCU's max. Y size in pixels
|
||||||
|
int m_blocks_per_mcu;
|
||||||
|
int m_max_blocks_per_row;
|
||||||
|
int m_mcus_per_row, m_mcus_per_col;
|
||||||
|
int m_mcu_org[JPGD_MAX_BLOCKS_PER_MCU];
|
||||||
|
int m_total_lines_left; // total # lines left in image
|
||||||
|
int m_mcu_lines_left; // total # lines left in this MCU
|
||||||
|
int m_real_dest_bytes_per_scan_line;
|
||||||
|
int m_dest_bytes_per_scan_line; // rounded up
|
||||||
|
int m_dest_bytes_per_pixel; // 4 (RGB) or 1 (Y)
|
||||||
|
huff_tables* m_pHuff_tabs[JPGD_MAX_HUFF_TABLES];
|
||||||
|
coeff_buf* m_dc_coeffs[JPGD_MAX_COMPONENTS];
|
||||||
|
coeff_buf* m_ac_coeffs[JPGD_MAX_COMPONENTS];
|
||||||
|
int m_eob_run;
|
||||||
|
int m_block_y_mcu[JPGD_MAX_COMPONENTS];
|
||||||
|
uint8* m_pIn_buf_ofs;
|
||||||
|
int m_in_buf_left;
|
||||||
|
int m_tem_flag;
|
||||||
|
bool m_eof_flag;
|
||||||
|
uint8 m_in_buf_pad_start[128];
|
||||||
|
uint8 m_in_buf[JPGD_IN_BUF_SIZE + 128];
|
||||||
|
uint8 m_in_buf_pad_end[128];
|
||||||
|
int m_bits_left;
|
||||||
|
uint m_bit_buf;
|
||||||
|
int m_restart_interval;
|
||||||
|
int m_restarts_left;
|
||||||
|
int m_next_restart_num;
|
||||||
|
int m_max_mcus_per_row;
|
||||||
|
int m_max_blocks_per_mcu;
|
||||||
|
int m_expanded_blocks_per_mcu;
|
||||||
|
int m_expanded_blocks_per_row;
|
||||||
|
int m_expanded_blocks_per_component;
|
||||||
|
bool m_freq_domain_chroma_upsample;
|
||||||
|
int m_max_mcus_per_col;
|
||||||
|
uint m_last_dc_val[JPGD_MAX_COMPONENTS];
|
||||||
|
jpgd_block_t* m_pMCU_coefficients;
|
||||||
|
int m_mcu_block_max_zag[JPGD_MAX_BLOCKS_PER_MCU];
|
||||||
|
uint8* m_pSample_buf;
|
||||||
|
int m_crr[256];
|
||||||
|
int m_cbb[256];
|
||||||
|
int m_crg[256];
|
||||||
|
int m_cbg[256];
|
||||||
|
uint8* m_pScan_line_0;
|
||||||
|
uint8* m_pScan_line_1;
|
||||||
|
jpgd_status m_error_code;
|
||||||
|
bool m_ready_flag;
|
||||||
|
int m_total_bytes_read;
|
||||||
|
|
||||||
|
void free_all_blocks();
|
||||||
|
// BEGIN EPIC MOD
|
||||||
|
UE_NORETURN void stop_decoding(jpgd_status status);
|
||||||
|
// END EPIC MOD
|
||||||
|
void *alloc(size_t n, bool zero = false);
|
||||||
|
void word_clear(void *p, uint16 c, uint n);
|
||||||
|
void prep_in_buffer();
|
||||||
|
void read_dht_marker();
|
||||||
|
void read_dqt_marker();
|
||||||
|
void read_sof_marker();
|
||||||
|
void skip_variable_marker();
|
||||||
|
void read_dri_marker();
|
||||||
|
void read_sos_marker();
|
||||||
|
int next_marker();
|
||||||
|
int process_markers();
|
||||||
|
void locate_soi_marker();
|
||||||
|
void locate_sof_marker();
|
||||||
|
int locate_sos_marker();
|
||||||
|
void init(jpeg_decoder_stream * pStream);
|
||||||
|
void create_look_ups();
|
||||||
|
void fix_in_buffer();
|
||||||
|
void transform_mcu(int mcu_row);
|
||||||
|
void transform_mcu_expand(int mcu_row);
|
||||||
|
coeff_buf* coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y);
|
||||||
|
inline jpgd_block_t *coeff_buf_getp(coeff_buf *cb, int block_x, int block_y);
|
||||||
|
void load_next_row();
|
||||||
|
void decode_next_row();
|
||||||
|
void make_huff_table(int index, huff_tables *pH);
|
||||||
|
void check_quant_tables();
|
||||||
|
void check_huff_tables();
|
||||||
|
void calc_mcu_block_order();
|
||||||
|
int init_scan();
|
||||||
|
void init_frame();
|
||||||
|
void process_restart();
|
||||||
|
void decode_scan(pDecode_block_func decode_block_func);
|
||||||
|
void init_progressive();
|
||||||
|
void init_sequential();
|
||||||
|
void decode_start();
|
||||||
|
void decode_init(jpeg_decoder_stream * pStream);
|
||||||
|
void H2V2Convert();
|
||||||
|
void H2V1Convert();
|
||||||
|
void H1V2Convert();
|
||||||
|
void H1V1Convert();
|
||||||
|
void gray_convert();
|
||||||
|
void expanded_convert();
|
||||||
|
void find_eoi();
|
||||||
|
inline uint get_char();
|
||||||
|
inline uint get_char(bool *pPadding_flag);
|
||||||
|
inline void stuff_char(uint8 q);
|
||||||
|
inline uint8 get_octet();
|
||||||
|
inline uint get_bits(int num_bits);
|
||||||
|
inline uint get_bits_no_markers(int numbits);
|
||||||
|
inline int huff_decode(huff_tables *pH);
|
||||||
|
inline int huff_decode(huff_tables *pH, int& extrabits);
|
||||||
|
static inline uint8 clamp(int i);
|
||||||
|
static void decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
||||||
|
static void decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
||||||
|
static void decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
||||||
|
static void decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace jpgd
|
||||||
|
|
||||||
|
#endif // JPEG_DECODER_H
|
||||||
文件差异内容过多而无法显示
加载差异
@@ -0,0 +1,172 @@
|
|||||||
|
|
||||||
|
// jpge.h - C++ class for JPEG compression.
|
||||||
|
// Public domain, Rich Geldreich <richgel99@gmail.com>
|
||||||
|
// Alex Evans: Added RGBA support, linear memory allocator.
|
||||||
|
#ifndef JPEG_ENCODER_H
|
||||||
|
#define JPEG_ENCODER_H
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
namespace jpge
|
||||||
|
{
|
||||||
|
typedef unsigned char uint8;
|
||||||
|
typedef signed short int16;
|
||||||
|
typedef signed int int32;
|
||||||
|
typedef unsigned short uint16;
|
||||||
|
typedef unsigned int uint32;
|
||||||
|
typedef unsigned int uint;
|
||||||
|
|
||||||
|
// JPEG chroma subsampling factors. Y_ONLY (grayscale images) and H2V2 (color images) are the most common.
|
||||||
|
enum subsampling_t { Y_ONLY = 0, H1V1 = 1, H2V1 = 2, H2V2 = 3 };
|
||||||
|
|
||||||
|
// JPEG compression parameters structure.
|
||||||
|
struct params
|
||||||
|
{
|
||||||
|
inline params() : m_quality(85), m_subsampling(H2V2), m_no_chroma_discrim_flag(false), m_two_pass_flag(false) { }
|
||||||
|
|
||||||
|
inline bool check_valid() const
|
||||||
|
{
|
||||||
|
if ((m_quality < 1) || (m_quality > 100)) return false;
|
||||||
|
if ((uint)m_subsampling > (uint)H2V2) return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Quality: 1-100, higher is better. Typical values are around 50-95.
|
||||||
|
int m_quality;
|
||||||
|
|
||||||
|
// m_subsampling:
|
||||||
|
// 0 = Y (grayscale) only
|
||||||
|
// 1 = YCbCr, no subsampling (H1V1, YCbCr 1x1x1, 3 blocks per MCU)
|
||||||
|
// 2 = YCbCr, H2V1 subsampling (YCbCr 2x1x1, 4 blocks per MCU)
|
||||||
|
// 3 = YCbCr, H2V2 subsampling (YCbCr 4x1x1, 6 blocks per MCU-- very common)
|
||||||
|
subsampling_t m_subsampling;
|
||||||
|
|
||||||
|
// Disables CbCr discrimination - only intended for testing.
|
||||||
|
// If true, the Y quantization table is also used for the CbCr channels.
|
||||||
|
bool m_no_chroma_discrim_flag;
|
||||||
|
|
||||||
|
bool m_two_pass_flag;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Writes JPEG image to a file.
|
||||||
|
// num_channels must be 1 (Y) or 3 (RGB), image pitch must be width*num_channels.
|
||||||
|
bool compress_image_to_jpeg_file(const char *pFilename, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params());
|
||||||
|
|
||||||
|
// Writes JPEG image to memory buffer.
|
||||||
|
// On entry, buf_size is the size of the output buffer pointed at by pBuf, which should be at least ~1024 bytes.
|
||||||
|
// If return value is true, buf_size will be set to the size of the compressed data.
|
||||||
|
bool compress_image_to_jpeg_file_in_memory(void *pBuf, int64_t &buf_size, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params());
|
||||||
|
|
||||||
|
// Output stream abstract class - used by the jpeg_encoder class to write to the output stream.
|
||||||
|
// put_buf() is generally called with len==JPGE_OUT_BUF_SIZE bytes, but for headers it'll be called with smaller amounts.
|
||||||
|
class output_stream
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
virtual ~output_stream() { };
|
||||||
|
virtual bool put_buf(const void* Pbuf, int64_t len) = 0;
|
||||||
|
template<class T> inline bool put_obj(const T& obj) { return put_buf(&obj, sizeof(T)); }
|
||||||
|
};
|
||||||
|
|
||||||
|
// Lower level jpeg_encoder class - useful if more control is needed than the above helper functions.
|
||||||
|
class jpeg_encoder
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
jpeg_encoder();
|
||||||
|
~jpeg_encoder();
|
||||||
|
|
||||||
|
// Initializes the compressor.
|
||||||
|
// pStream: The stream object to use for writing compressed data.
|
||||||
|
// params - Compression parameters structure, defined above.
|
||||||
|
// width, height - Image dimensions.
|
||||||
|
// channels - May be 1, or 3. 1 indicates grayscale, 3 indicates RGB source data.
|
||||||
|
// Returns false on out of memory or if a stream write fails.
|
||||||
|
bool init(output_stream *pStream, int64_t width, int64_t height, int64_t src_channels, const params &comp_params = params());
|
||||||
|
|
||||||
|
const params &get_params() const { return m_params; }
|
||||||
|
|
||||||
|
// Deinitializes the compressor, freeing any allocated memory. May be called at any time.
|
||||||
|
void deinit();
|
||||||
|
|
||||||
|
uint get_total_passes() const { return m_params.m_two_pass_flag ? 2 : 1; }
|
||||||
|
inline uint get_cur_pass() { return m_pass_num; }
|
||||||
|
|
||||||
|
// Call this method with each source scanline.
|
||||||
|
// width * src_channels bytes per scanline is expected (RGB or Y format).
|
||||||
|
// You must call with NULL after all scanlines are processed to finish compression.
|
||||||
|
// Returns false on out of memory or if a stream write fails.
|
||||||
|
bool process_scanline(const void* pScanline);
|
||||||
|
|
||||||
|
private:
|
||||||
|
jpeg_encoder(const jpeg_encoder &);
|
||||||
|
jpeg_encoder &operator =(const jpeg_encoder &);
|
||||||
|
|
||||||
|
typedef int32 sample_array_t;
|
||||||
|
|
||||||
|
output_stream *m_pStream;
|
||||||
|
params m_params;
|
||||||
|
uint8 m_num_components;
|
||||||
|
uint8 m_comp_h_samp[3], m_comp_v_samp[3];
|
||||||
|
int m_image_x, m_image_y, m_image_bpp, m_image_bpl;
|
||||||
|
int m_image_x_mcu, m_image_y_mcu;
|
||||||
|
int m_image_bpl_xlt, m_image_bpl_mcu;
|
||||||
|
int m_mcus_per_row;
|
||||||
|
int m_mcu_x, m_mcu_y;
|
||||||
|
uint8 *m_mcu_lines[16];
|
||||||
|
uint8 m_mcu_y_ofs;
|
||||||
|
sample_array_t m_sample_array[64];
|
||||||
|
int16 m_coefficient_array[64];
|
||||||
|
int32 m_quantization_tables[2][64];
|
||||||
|
uint m_huff_codes[4][256];
|
||||||
|
uint8 m_huff_code_sizes[4][256];
|
||||||
|
uint8 m_huff_bits[4][17];
|
||||||
|
uint8 m_huff_val[4][256];
|
||||||
|
uint32 m_huff_count[4][256];
|
||||||
|
int m_last_dc_val[3];
|
||||||
|
enum { JPGE_OUT_BUF_SIZE = 2048 };
|
||||||
|
uint8 m_out_buf[JPGE_OUT_BUF_SIZE];
|
||||||
|
uint8 *m_pOut_buf;
|
||||||
|
uint m_out_buf_left;
|
||||||
|
uint32 m_bit_buffer;
|
||||||
|
uint m_bits_in;
|
||||||
|
uint8 m_pass_num;
|
||||||
|
bool m_all_stream_writes_succeeded;
|
||||||
|
|
||||||
|
void optimize_huffman_table(int table_num, int table_len);
|
||||||
|
void emit_byte(uint8 i);
|
||||||
|
void emit_word(uint i);
|
||||||
|
void emit_marker(int marker);
|
||||||
|
void emit_jfif_app0();
|
||||||
|
void emit_dqt();
|
||||||
|
void emit_sof();
|
||||||
|
void emit_dht(uint8 *bits, uint8 *val, int index, bool ac_flag);
|
||||||
|
void emit_dhts();
|
||||||
|
void emit_sos();
|
||||||
|
void emit_markers();
|
||||||
|
void compute_huffman_table(uint *codes, uint8 *code_sizes, uint8 *bits, uint8 *val);
|
||||||
|
void compute_quant_table(int32 *dst, int16 *src);
|
||||||
|
void adjust_quant_table(int32 *dst, int32 *src);
|
||||||
|
void first_pass_init();
|
||||||
|
bool second_pass_init();
|
||||||
|
bool jpg_open(int p_x_res, int p_y_res, int src_channels);
|
||||||
|
void load_block_8_8_grey(int x);
|
||||||
|
void load_block_8_8(int x, int y, int c);
|
||||||
|
void load_block_16_8(int x, int c);
|
||||||
|
void load_block_16_8_8(int x, int c);
|
||||||
|
void load_quantized_coefficients(int component_num);
|
||||||
|
void flush_output_buffer();
|
||||||
|
void put_bits(uint bits, uint len);
|
||||||
|
void code_coefficients_pass_one(int component_num);
|
||||||
|
void code_coefficients_pass_two(int component_num);
|
||||||
|
void code_block(int component_num);
|
||||||
|
void process_mcu_row();
|
||||||
|
bool terminate_pass_one();
|
||||||
|
bool terminate_pass_two();
|
||||||
|
bool process_end_of_image();
|
||||||
|
void load_mcu(const void* src);
|
||||||
|
void clear();
|
||||||
|
void init();
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace jpge
|
||||||
|
|
||||||
|
#endif // JPEG_ENCODER
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
jpge.h - C++ class for JPEG compression.
|
||||||
|
Public domain, Rich Geldreich <richgel99@gmail.com>
|
||||||
|
Alex Evans: Added RGBA support, linear memory allocator.
|
||||||
文件差异内容过多而无法显示
加载差异
文件差异内容过多而无法显示
加载差异
@@ -0,0 +1,433 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <utility>
|
||||||
|
#include <cstring>
|
||||||
|
#include <type_traits>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "libipc/def.h"
|
||||||
|
|
||||||
|
#include "libipc/platform/detail.h"
|
||||||
|
#include "libipc/circ/elem_def.h"
|
||||||
|
#include "libipc/utility/log.h"
|
||||||
|
#include "libipc/utility/utility.h"
|
||||||
|
|
||||||
|
namespace ipc {
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////
|
||||||
|
/// producer-consumer implementation
|
||||||
|
////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
template <typename Flag>
|
||||||
|
struct prod_cons_impl;
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
|
||||||
|
|
||||||
|
template <std::size_t DataSize, std::size_t AlignSize>
|
||||||
|
struct elem_t {
|
||||||
|
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||||
|
};
|
||||||
|
|
||||||
|
alignas(cache_line_size) std::atomic<circ::u2_t> rd_; // read index
|
||||||
|
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
|
||||||
|
|
||||||
|
constexpr circ::u2_t cursor() const noexcept {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool push(W* /*wrapper*/, F&& f, E* elems) {
|
||||||
|
auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||||
|
if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) {
|
||||||
|
return false; // full
|
||||||
|
}
|
||||||
|
std::forward<F>(f)(&(elems[cur_wt].data_));
|
||||||
|
wt_.fetch_add(1, std::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'.
|
||||||
|
* So we could just disconnect all connections of receiver, and return false.
|
||||||
|
*/
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool force_push(W* wrapper, F&&, E*) {
|
||||||
|
wrapper->elems()->disconnect_receiver(~static_cast<circ::cc_t>(0u));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename R, typename E>
|
||||||
|
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) {
|
||||||
|
auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed));
|
||||||
|
if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) {
|
||||||
|
return false; // empty
|
||||||
|
}
|
||||||
|
std::forward<F>(f)(&(elems[cur_rd].data_));
|
||||||
|
std::forward<R>(out)(true);
|
||||||
|
rd_.fetch_add(1, std::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct prod_cons_impl<wr<relat::single, relat::multi , trans::unicast>>
|
||||||
|
: prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
|
||||||
|
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool force_push(W* wrapper, F&&, E*) {
|
||||||
|
wrapper->elems()->disconnect_receiver(1);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename R,
|
||||||
|
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
|
||||||
|
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
|
||||||
|
byte_t buff[DS];
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
auto cur_rd = rd_.load(std::memory_order_relaxed);
|
||||||
|
if (circ::index_of(cur_rd) ==
|
||||||
|
circ::index_of(wt_.load(std::memory_order_acquire))) {
|
||||||
|
return false; // empty
|
||||||
|
}
|
||||||
|
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
|
||||||
|
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
|
||||||
|
std::forward<F>(f)(buff);
|
||||||
|
std::forward<R>(out)(true);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct prod_cons_impl<wr<relat::multi , relat::multi, trans::unicast>>
|
||||||
|
: prod_cons_impl<wr<relat::single, relat::multi, trans::unicast>> {
|
||||||
|
|
||||||
|
using flag_t = std::uint64_t;
|
||||||
|
|
||||||
|
template <std::size_t DataSize, std::size_t AlignSize>
|
||||||
|
struct elem_t {
|
||||||
|
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||||
|
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
|
||||||
|
};
|
||||||
|
|
||||||
|
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
|
||||||
|
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool push(W* /*wrapper*/, F&& f, E* elems) {
|
||||||
|
circ::u2_t cur_ct, nxt_ct;
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
cur_ct = ct_.load(std::memory_order_relaxed);
|
||||||
|
if (circ::index_of(nxt_ct = cur_ct + 1) ==
|
||||||
|
circ::index_of(rd_.load(std::memory_order_acquire))) {
|
||||||
|
return false; // full
|
||||||
|
}
|
||||||
|
if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
auto* el = elems + circ::index_of(cur_ct);
|
||||||
|
std::forward<F>(f)(&(el->data_));
|
||||||
|
// set flag & try update wt
|
||||||
|
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||||
|
while (1) {
|
||||||
|
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
|
||||||
|
if (cur_ct != wt_.load(std::memory_order_relaxed)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if ((~cac_ct) != cur_ct) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
wt_.store(nxt_ct, std::memory_order_release);
|
||||||
|
cur_ct = nxt_ct;
|
||||||
|
nxt_ct = cur_ct + 1;
|
||||||
|
el = elems + circ::index_of(cur_ct);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool force_push(W* wrapper, F&&, E*) {
|
||||||
|
wrapper->elems()->disconnect_receiver(1);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename R,
|
||||||
|
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
|
||||||
|
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
|
||||||
|
byte_t buff[DS];
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
auto cur_rd = rd_.load(std::memory_order_relaxed);
|
||||||
|
auto cur_wt = wt_.load(std::memory_order_acquire);
|
||||||
|
auto id_rd = circ::index_of(cur_rd);
|
||||||
|
auto id_wt = circ::index_of(cur_wt);
|
||||||
|
if (id_rd == id_wt) {
|
||||||
|
auto* el = elems + id_wt;
|
||||||
|
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
|
||||||
|
if ((~cac_ct) != cur_wt) {
|
||||||
|
return false; // empty
|
||||||
|
}
|
||||||
|
if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) {
|
||||||
|
wt_.store(cur_wt + 1, std::memory_order_release);
|
||||||
|
}
|
||||||
|
k = 0;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
|
||||||
|
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
|
||||||
|
std::forward<F>(f)(buff);
|
||||||
|
std::forward<R>(out)(true);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct prod_cons_impl<wr<relat::single, relat::multi, trans::broadcast>> {
|
||||||
|
|
||||||
|
using rc_t = std::uint64_t;
|
||||||
|
|
||||||
|
enum : rc_t {
|
||||||
|
ep_mask = 0x00000000ffffffffull,
|
||||||
|
ep_incr = 0x0000000100000000ull
|
||||||
|
};
|
||||||
|
|
||||||
|
template <std::size_t DataSize, std::size_t AlignSize>
|
||||||
|
struct elem_t {
|
||||||
|
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||||
|
std::atomic<rc_t> rc_ { 0 }; // read-counter
|
||||||
|
};
|
||||||
|
|
||||||
|
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
|
||||||
|
alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer
|
||||||
|
|
||||||
|
circ::u2_t cursor() const noexcept {
|
||||||
|
return wt_.load(std::memory_order_acquire);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool push(W* wrapper, F&& f, E* elems) {
|
||||||
|
E* el;
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||||
|
if (cc == 0) return false; // no reader
|
||||||
|
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||||
|
// check all consumers have finished reading this element
|
||||||
|
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||||
|
circ::cc_t rem_cc = cur_rc & ep_mask;
|
||||||
|
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) {
|
||||||
|
return false; // has not finished yet
|
||||||
|
}
|
||||||
|
// consider rem_cc to be 0 here
|
||||||
|
if (el->rc_.compare_exchange_weak(
|
||||||
|
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
std::forward<F>(f)(&(el->data_));
|
||||||
|
wt_.fetch_add(1, std::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool force_push(W* wrapper, F&& f, E* elems) {
|
||||||
|
E* el;
|
||||||
|
epoch_ += ep_incr;
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||||
|
if (cc == 0) return false; // no reader
|
||||||
|
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||||
|
// check all consumers have finished reading this element
|
||||||
|
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||||
|
circ::cc_t rem_cc = cur_rc & ep_mask;
|
||||||
|
if (cc & rem_cc) {
|
||||||
|
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
|
||||||
|
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
|
||||||
|
if (cc == 0) return false; // no reader
|
||||||
|
}
|
||||||
|
// just compare & exchange
|
||||||
|
if (el->rc_.compare_exchange_weak(
|
||||||
|
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
std::forward<F>(f)(&(el->data_));
|
||||||
|
wt_.fetch_add(1, std::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename R, typename E>
|
||||||
|
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) {
|
||||||
|
if (cur == cursor()) return false; // acquire
|
||||||
|
auto* el = elems + circ::index_of(cur++);
|
||||||
|
std::forward<F>(f)(&(el->data_));
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||||
|
if ((cur_rc & ep_mask) == 0) {
|
||||||
|
std::forward<R>(out)(true);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
auto nxt_rc = cur_rc & ~static_cast<rc_t>(wrapper->connected_id());
|
||||||
|
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
|
||||||
|
std::forward<R>(out)((nxt_rc & ep_mask) == 0);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct prod_cons_impl<wr<relat::multi, relat::multi, trans::broadcast>> {
|
||||||
|
|
||||||
|
using rc_t = std::uint64_t;
|
||||||
|
using flag_t = std::uint64_t;
|
||||||
|
|
||||||
|
enum : rc_t {
|
||||||
|
rc_mask = 0x00000000ffffffffull,
|
||||||
|
ep_mask = 0x00ffffffffffffffull,
|
||||||
|
ep_incr = 0x0100000000000000ull,
|
||||||
|
ic_mask = 0xff000000ffffffffull,
|
||||||
|
ic_incr = 0x0000000100000000ull
|
||||||
|
};
|
||||||
|
|
||||||
|
template <std::size_t DataSize, std::size_t AlignSize>
|
||||||
|
struct elem_t {
|
||||||
|
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||||
|
std::atomic<rc_t > rc_ { 0 }; // read-counter
|
||||||
|
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
|
||||||
|
};
|
||||||
|
|
||||||
|
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
|
||||||
|
alignas(cache_line_size) std::atomic<rc_t> epoch_ { 0 };
|
||||||
|
|
||||||
|
circ::u2_t cursor() const noexcept {
|
||||||
|
return ct_.load(std::memory_order_acquire);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr static rc_t inc_rc(rc_t rc) noexcept {
|
||||||
|
return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr static rc_t inc_mask(rc_t rc) noexcept {
|
||||||
|
return inc_rc(rc) & ~rc_mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool push(W* wrapper, F&& f, E* elems) {
|
||||||
|
E* el;
|
||||||
|
circ::u2_t cur_ct;
|
||||||
|
rc_t epoch = epoch_.load(std::memory_order_acquire);
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||||
|
if (cc == 0) return false; // no reader
|
||||||
|
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
|
||||||
|
// check all consumers have finished reading this element
|
||||||
|
auto cur_rc = el->rc_.load(std::memory_order_relaxed);
|
||||||
|
circ::cc_t rem_cc = cur_rc & rc_mask;
|
||||||
|
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) {
|
||||||
|
return false; // has not finished yet
|
||||||
|
}
|
||||||
|
else if (!rem_cc) {
|
||||||
|
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
|
||||||
|
if ((cur_fl != cur_ct) && cur_fl) {
|
||||||
|
return false; // full
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// consider rem_cc to be 0 here
|
||||||
|
if (el->rc_.compare_exchange_weak(
|
||||||
|
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed) &&
|
||||||
|
epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
// only one thread/process would touch here at one time
|
||||||
|
ct_.store(cur_ct + 1, std::memory_order_release);
|
||||||
|
std::forward<F>(f)(&(el->data_));
|
||||||
|
// set flag & try update wt
|
||||||
|
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename E>
|
||||||
|
bool force_push(W* wrapper, F&& f, E* elems) {
|
||||||
|
E* el;
|
||||||
|
circ::u2_t cur_ct;
|
||||||
|
rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||||
|
if (cc == 0) return false; // no reader
|
||||||
|
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
|
||||||
|
// check all consumers have finished reading this element
|
||||||
|
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||||
|
circ::cc_t rem_cc = cur_rc & rc_mask;
|
||||||
|
if (cc & rem_cc) {
|
||||||
|
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
|
||||||
|
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
|
||||||
|
if (cc == 0) return false; // no reader
|
||||||
|
}
|
||||||
|
// just compare & exchange
|
||||||
|
if (el->rc_.compare_exchange_weak(
|
||||||
|
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed)) {
|
||||||
|
if (epoch == epoch_.load(std::memory_order_acquire)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
else if (push(wrapper, std::forward<F>(f), elems)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
// only one thread/process would touch here at one time
|
||||||
|
ct_.store(cur_ct + 1, std::memory_order_release);
|
||||||
|
std::forward<F>(f)(&(el->data_));
|
||||||
|
// set flag & try update wt
|
||||||
|
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename W, typename F, typename R, typename E, std::size_t N>
|
||||||
|
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) {
|
||||||
|
auto* el = elems + circ::index_of(cur);
|
||||||
|
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
|
||||||
|
if (cur_fl != ~static_cast<flag_t>(cur)) {
|
||||||
|
return false; // empty
|
||||||
|
}
|
||||||
|
++cur;
|
||||||
|
std::forward<F>(f)(&(el->data_));
|
||||||
|
for (unsigned k = 0;;) {
|
||||||
|
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||||
|
if ((cur_rc & rc_mask) == 0) {
|
||||||
|
std::forward<R>(out)(true);
|
||||||
|
el->f_ct_.store(cur + N - 1, std::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
auto nxt_rc = inc_rc(cur_rc) & ~static_cast<rc_t>(wrapper->connected_id());
|
||||||
|
bool last_one = false;
|
||||||
|
if ((last_one = (nxt_rc & rc_mask) == 0)) {
|
||||||
|
el->f_ct_.store(cur + N - 1, std::memory_order_release);
|
||||||
|
}
|
||||||
|
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
|
||||||
|
std::forward<R>(out)(last_one);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
ipc::yield(k);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace ipc
|
||||||
@@ -0,0 +1,58 @@
|
|||||||
|
The goal of reducing sequential computation also forms the foundation of the Extended Neural GPU \citep{extendedngpu}, ByteNet \citep{NalBytenet2017} and ConvS2S \citep{JonasFaceNet2017}, all of which use convolutional neural networks as basic building block, computing hidden representations in parallel for all input and output positions. In these models, the number of operations required to relate signals from two arbitrary input or output positions grows in the distance between positions, linearly for ConvS2S and logarithmically for ByteNet. This makes it more difficult to learn dependencies between distant positions \citep{hochreiter2001gradient}. In the Transformer this is reduced to a constant number of operations, albeit at the cost of reduced effective resolution due to averaging attention-weighted positions, an effect we counteract with Multi-Head Attention as described in section~\ref{sec:attention}.
|
||||||
|
|
||||||
|
Self-attention, sometimes called intra-attention is an attention mechanism relating different positions of a single sequence in order to compute a representation of the sequence. Self-attention has been used successfully in a variety of tasks including reading comprehension, abstractive summarization, textual entailment and learning task-independent sentence representations \citep{cheng2016long, decomposableAttnModel, paulus2017deep, lin2017structured}.
|
||||||
|
|
||||||
|
End-to-end memory networks are based on a recurrent attention mechanism instead of sequence-aligned recurrence and have been shown to perform well on simple-language question answering and language modeling tasks \citep{sukhbaatar2015}.
|
||||||
|
|
||||||
|
To the best of our knowledge, however, the Transformer is the first transduction model relying entirely on self-attention to compute representations of its input and output without using sequence-aligned RNNs or convolution.
|
||||||
|
In the following sections, we will describe the Transformer, motivate self-attention and discuss its advantages over models such as \citep{neural_gpu, NalBytenet2017} and \citep{JonasFaceNet2017}.
|
||||||
|
|
||||||
|
|
||||||
|
%\citep{JonasFaceNet2017} report new SOTA on machine translation for English-to-German (EnDe), Enlish-to-French (EnFr) and English-to-Romanian language pairs.
|
||||||
|
|
||||||
|
%For example,! in MT, we must draw information from both input and previous output words to translate an output word accurately. An attention layer \citep{bahdanau2014neural} can connect a very large number of positions at low computation cost, making it an essential ingredient in competitive recurrent models for machine translation.
|
||||||
|
|
||||||
|
%A natural question to ask then is, "Could we replace recurrence with attention?". \marginpar{Don't know if it's the most natural question to ask given the previous statements. Also, need to say that the complexity table summarizes these statements} Such a model would be blessed with the computational efficiency of attention and the power of cross-positional communication. In this work, show that pure attention models work remarkably well for MT, achieving new SOTA results on EnDe and EnFr, and can be trained in under $2$ days on xyz architecture.
|
||||||
|
|
||||||
|
%After the seminal models introduced in \citep{sutskever14, bahdanau2014neural, cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation (MT) and language modeling with recurrent endoder-decoder and recurrent language models. Recent effort \citep{shazeer2017outrageously} has successfully combined the power of conditional computation with sequence models to train very large models for MT, pushing SOTA at lower computational cost.
|
||||||
|
|
||||||
|
%Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state precludes processing all timesteps at once, instead requiring long sequences of sequential operations. In practice, this results in greatly reduced computational efficiency, as on modern computing hardware, a single operation on a large batch is much faster than a large number of operations on small batches. The problem gets worse at longer sequence lengths. Although sequential computation is not a severe bottleneck at inference time, as autoregressively generating each output requires all previous outputs, the inability to compute scores at all output positions at once hinders us from rapidly training our models over large datasets. Although impressive work such as \citep{Kuchaiev2017Factorization} is able to significantly accelerate the training of LSTMs with factorization tricks, we are still bound by the linear dependence on sequence length.
|
||||||
|
|
||||||
|
%If the model could compute hidden states at each time step using only the inputs and outputs, it would be liberated from the dependence on results from previous time steps during training. This line of thought is the foundation of recent efforts such as the Markovian neural GPU \citep{neural_gpu}, ByteNet \citep{NalBytenet2017} and ConvS2S \citep{JonasFaceNet2017}, all of which use convolutional neural networks as a building block to compute hidden representations simultaneously for all timesteps, resulting in $O(1)$ sequential time complexity. \citep{JonasFaceNet2017} report new SOTA on machine translation for English-to-German (EnDe), Enlish-to-French (EnFr) and English-to-Romanian language pairs.
|
||||||
|
|
||||||
|
%A crucial component for accurate sequence prediction is modeling cross-positional communication. For example, in MT, we must draw information from both input and previous output words to translate an output word accurately. An attention layer \citep{bahdanau2014neural} can connect a very large number of positions at a low computation cost, also $O(1)$ sequential time complexity, making it an essential ingredient in recurrent encoder-decoder architectures for MT. A natural question to ask then is, "Could we replace recurrence with attention?". \marginpar{Don't know if it's the most natural question to ask given the previous statements. Also, need to say that the complexity table summarizes these statements} Such a model would be blessed with the computational efficiency of attention and the power of cross-positional communication. In this work, show that pure attention models work remarkably well for MT, achieving new SOTA results on EnDe and EnFr, and can be trained in under $2$ days on xyz architecture.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
%Note: Facebook model is no better than RNNs in this regard, since it requires a number of layers proportional to the distance you want to communicate. Bytenet is more promising, since it requires a logarithmnic number of layers (does bytenet have SOTA results)?
|
||||||
|
|
||||||
|
%Note: An attention layer can connect a very large number of positions at a low computation cost in O(1) sequential operations. This is why encoder-decoder attention has been so successful in seq-to-seq models so far. It is only natural, then, to also use attention to connect the timesteps of the same sequence.
|
||||||
|
|
||||||
|
%Note: I wouldn't say that long sequences are not a problem during inference. It would be great if we could infer with no long sequences. We could just say later on that, while our training graph is constant-depth, our model still requires sequential operations in the decoder part during inference due to the autoregressive nature of the model.
|
||||||
|
|
||||||
|
%\begin{table}[h!]
|
||||||
|
%\caption{Attention models are quite efficient for cross-positional communications when sequence length is smaller than channel depth. $n$ represents the sequence length and $d$ represents the channel depth.}
|
||||||
|
%\label{tab:op_complexities}
|
||||||
|
%\begin{center}
|
||||||
|
%\vspace{-5pt}
|
||||||
|
%\scalebox{0.75}{
|
||||||
|
|
||||||
|
%\begin{tabular}{l|c|c|c}
|
||||||
|
%\hline \hline
|
||||||
|
%Layer Type & Receptive & Complexity & Sequential \\
|
||||||
|
% & Field & & Operations \\
|
||||||
|
%\hline
|
||||||
|
%Pointwise Feed-Forward & $1$ & $O(n \cdot d^2)$ & $O(1)$ \\
|
||||||
|
%\hline
|
||||||
|
%Recurrent & $n$ & $O(n \cdot d^2)$ & $O(n)$ \\
|
||||||
|
%\hline
|
||||||
|
%Convolutional & $r$ & $O(r \cdot n \cdot d^2)$ & $O(1)$ \\
|
||||||
|
%\hline
|
||||||
|
%Convolutional (separable) & $r$ & $O(r \cdot n \cdot d + n %\cdot d^2)$ & $O(1)$ \\
|
||||||
|
%\hline
|
||||||
|
%Attention & $r$ & $O(r \cdot n \cdot d)$ & $O(1)$ \\
|
||||||
|
%\hline \hline
|
||||||
|
%\end{tabular}
|
||||||
|
%}
|
||||||
|
%\end{center}
|
||||||
|
%\end{table}
|
||||||
@@ -0,0 +1,18 @@
|
|||||||
|
Recurrent neural networks, long short-term memory \citep{hochreiter1997} and gated recurrent \citep{gruEval14} neural networks in particular, have been firmly established as state of the art approaches in sequence modeling and transduction problems such as language modeling and machine translation \citep{sutskever14, bahdanau2014neural, cho2014learning}. Numerous efforts have since continued to push the boundaries of recurrent language models and encoder-decoder architectures \citep{wu2016google,luong2015effective,jozefowicz2016exploring}.
|
||||||
|
|
||||||
|
Recurrent models typically factor computation along the symbol positions of the input and output sequences. Aligning the positions to steps in computation time, they generate a sequence of hidden states $h_t$, as a function of the previous hidden state $h_{t-1}$ and the input for position $t$. This inherently sequential nature precludes parallelization within training examples, which becomes critical at longer sequence lengths, as memory constraints limit batching across examples.
|
||||||
|
%\marginpar{not sure if the memory constraints are understandable here}
|
||||||
|
Recent work has achieved significant improvements in computational efficiency through factorization tricks \citep{Kuchaiev2017Factorization} and conditional computation \citep{shazeer2017outrageously}, while also improving model performance in case of the latter. The fundamental constraint of sequential computation, however, remains.
|
||||||
|
|
||||||
|
%\marginpar{@all: there is work on analyzing what attention really does in seq2seq models, couldn't find it right away}
|
||||||
|
|
||||||
|
Attention mechanisms have become an integral part of compelling sequence modeling and transduction models in various tasks, allowing modeling of dependencies without regard to their distance in the input or output sequences \citep{bahdanau2014neural, structuredAttentionNetworks}. In all but a few cases \citep{decomposableAttnModel}, however, such attention mechanisms are used in conjunction with a recurrent network.
|
||||||
|
|
||||||
|
%\marginpar{not sure if "cross-positional communication" is understandable without explanation}
|
||||||
|
%\marginpar{insert exact training times and stats for the model that reaches sota earliest, maybe even a single GPU model?}
|
||||||
|
|
||||||
|
In this work we propose the Transformer, a model architecture eschewing recurrence and instead relying entirely on an attention mechanism to draw global dependencies between input and output. The Transformer allows for significantly more parallelization and can reach a new state of the art in translation quality after being trained for as little as twelve hours on eight P100 GPUs.
|
||||||
|
%\marginpar{you removed the constant number of repetitions part. I wrote it because I wanted to make it clear that the model does not only perform attention once, while it's also not recurrent. I thought that might be important to get across early.}
|
||||||
|
|
||||||
|
% Just a standard paragraph with citations, rewrite.
|
||||||
|
%After the seminal papers of \citep{sutskever14}, \citep{bahdanau2014neural}, and \citep{cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation and language modeling with recurrent sequence models. Recent effort \citep{shazeer2017outrageously} has combined the power of conditional computation with sequence models to train very large models for machine translation, pushing SOTA at lower computational cost. Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state encumbers recurrnet models to process multiple inputs at once, and their time complexity is a linear function of the length of the input and output, both during training and inference. [What I want to say here is that although this is fine during decoding, at training time, we are given both input and output and this linear nature does not allow the RNN to process all inputs and outputs simultaneously and haven't been used on datasets that are the of the scale of the web. What's the largest dataset we have ? . Talk about Nividia and possibly other's effors to speed up things, and possibly other efforts that alleviate this, but are still limited by it's comptuational nature]. Rest of the intro: What if you could construct the state based on the actual inputs and outputs, then you could construct them all at once. This has been the foundation of many promising recent efforts, bytenet,facenet (Also talk about quasi rnn here). Now we talk about attention!! Along with cell architectures such as long short-term meory (LSTM) \citep{hochreiter1997}, and gated recurrent units (GRUs) \citep{cho2014learning}, attention has emerged as an essential ingredient in successful sequence models, in particular for machine translation. In recent years, many, if not all, state-of-the-art (SOTA) results in machine translation have been achieved with attention-based sequence models \citep{wu2016google,luong2015effective,jozefowicz2016exploring}. Talk about the neon work on how it played with attention to do self attention! Then talk about what we do.
|
||||||
@@ -0,0 +1,155 @@
|
|||||||
|
|
||||||
|
\begin{figure}
|
||||||
|
\centering
|
||||||
|
\includegraphics[scale=0.6]{Figures/ModalNet-21}
|
||||||
|
\caption{The Transformer - model architecture.}
|
||||||
|
\label{fig:model-arch}
|
||||||
|
\end{figure}
|
||||||
|
|
||||||
|
% Although the primary workhorse of our model is attention,
|
||||||
|
%Our model maintains the encoder-decoder structure that is common to many so-called sequence-to-sequence models \citep{bahdanau2014neural,sutskever14}. As in all such architectures, the encoder computes a representation of the input sequence, and the decoder consumes these representations along with the output tokens to autoregressively produce the output sequence. Where, traditionally, the encoder and decoder contain stacks of recurrent or convolutional layers, our encoder and decoder stacks are composed of attention layers and position-wise feed-forward layers (Figure~\ref{fig:model-arch}). The following sections describe the gross architecture and these particular components in detail.
|
||||||
|
|
||||||
|
Most competitive neural sequence transduction models have an encoder-decoder structure \citep{cho2014learning,bahdanau2014neural,sutskever14}. Here, the encoder maps an input sequence of symbol representations $(x_1, ..., x_n)$ to a sequence of continuous representations $\mathbf{z} = (z_1, ..., z_n)$. Given $\mathbf{z}$, the decoder then generates an output sequence $(y_1,...,y_m)$ of symbols one element at a time. At each step the model is auto-regressive \citep{graves2013generating}, consuming the previously generated symbols as additional input when generating the next.
|
||||||
|
|
||||||
|
The Transformer follows this overall architecture using stacked self-attention and point-wise, fully connected layers for both the encoder and decoder, shown in the left and right halves of Figure~\ref{fig:model-arch}, respectively.
|
||||||
|
|
||||||
|
\subsection{Encoder and Decoder Stacks}
|
||||||
|
|
||||||
|
\paragraph{Encoder:}The encoder is composed of a stack of $N=6$ identical layers. Each layer has two sub-layers. The first is a multi-head self-attention mechanism, and the second is a simple, position-wise fully connected feed-forward network. We employ a residual connection \citep{he2016deep} around each of the two sub-layers, followed by layer normalization \cite{layernorm2016}. That is, the output of each sub-layer is $\mathrm{LayerNorm}(x + \mathrm{Sublayer}(x))$, where $\mathrm{Sublayer}(x)$ is the function implemented by the sub-layer itself. To facilitate these residual connections, all sub-layers in the model, as well as the embedding layers, produce outputs of dimension $\dmodel=512$.
|
||||||
|
|
||||||
|
\paragraph{Decoder:}The decoder is also composed of a stack of $N=6$ identical layers. In addition to the two sub-layers in each encoder layer, the decoder inserts a third sub-layer, which performs multi-head attention over the output of the encoder stack. Similar to the encoder, we employ residual connections around each of the sub-layers, followed by layer normalization. We also modify the self-attention sub-layer in the decoder stack to prevent positions from attending to subsequent positions. This masking, combined with fact that the output embeddings are offset by one position, ensures that the predictions for position $i$ can depend only on the known outputs at positions less than $i$.
|
||||||
|
|
||||||
|
% In our model (Figure~\ref{fig:model-arch}), the encoder and decoder are composed of stacks of alternating self-attention layers (for cross-positional communication) and position-wise feed-forward layers (for in-place computation). In addition, the decoder stack contains encoder-decoder attention layers. Since attention is agnostic to the distances between words, our model requires a "positional encoding" to be added to the encoder and decoder input. The following sections describe all of these components in detail.
|
||||||
|
|
||||||
|
\subsection{Attention} \label{sec:attention}
|
||||||
|
An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors. The output is computed as a weighted sum of the values, where the weight assigned to each value is computed by a compatibility function of the query with the corresponding key.
|
||||||
|
|
||||||
|
\subsubsection{Scaled Dot-Product Attention} \label{sec:scaled-dot-prod}
|
||||||
|
|
||||||
|
% \begin{figure}
|
||||||
|
% \centering
|
||||||
|
% \includegraphics[scale=0.6]{Figures/ModalNet-19}
|
||||||
|
% \caption{Scaled Dot-Product Attention.}
|
||||||
|
% \label{fig:multi-head-att}
|
||||||
|
% \end{figure}
|
||||||
|
|
||||||
|
We call our particular attention "Scaled Dot-Product Attention" (Figure~\ref{fig:multi-head-att}). The input consists of queries and keys of dimension $d_k$, and values of dimension $d_v$. We compute the dot products of the query with all keys, divide each by $\sqrt{d_k}$, and apply a softmax function to obtain the weights on the values.
|
||||||
|
|
||||||
|
In practice, we compute the attention function on a set of queries simultaneously, packed together into a matrix $Q$. The keys and values are also packed together into matrices $K$ and $V$. We compute the matrix of outputs as:
|
||||||
|
|
||||||
|
\begin{equation}
|
||||||
|
\mathrm{Attention}(Q, K, V) = \mathrm{softmax}(\frac{QK^T}{\sqrt{d_k}})V
|
||||||
|
\end{equation}
|
||||||
|
|
||||||
|
The two most commonly used attention functions are additive attention \citep{bahdanau2014neural}, and dot-product (multiplicative) attention. Dot-product attention is identical to our algorithm, except for the scaling factor of $\frac{1}{\sqrt{d_k}}$. Additive attention computes the compatibility function using a feed-forward network with a single hidden layer. While the two are similar in theoretical complexity, dot-product attention is much faster and more space-efficient in practice, since it can be implemented using highly optimized matrix multiplication code.
|
||||||
|
|
||||||
|
%We scale the dot products by $1/\sqrt{d_k}$ to limit the magnitude of the dot products, which works well in practice. Otherwise, we found applying the softmax to often result in weights very close to 0 or 1, and hence minuscule gradients.
|
||||||
|
|
||||||
|
% Already described in the subsequent section
|
||||||
|
%When used as part of decoder self-attention, an optional mask function is applied just before the softmax to prevent positions from attending to subsequent positions. This mask simply sets the logits corresponding to all illegal connections (those outside of the lower triangle) to $-\infty$.
|
||||||
|
|
||||||
|
%\paragraph{Comparison to Additive Attention: } We choose dot product attention over additive attention \citep{bahdanau2014neural} since it can be computed using highly optimized matrix multiplication code. This optimization is particularly important to us, as we employ many attention layers in our model.
|
||||||
|
|
||||||
|
While for small values of $d_k$ the two mechanisms perform similarly, additive attention outperforms dot product attention without scaling for larger values of $d_k$ \citep{DBLP:journals/corr/BritzGLL17}. We suspect that for large values of $d_k$, the dot products grow large in magnitude, pushing the softmax function into regions where it has extremely small gradients \footnote{To illustrate why the dot products get large, assume that the components of $q$ and $k$ are independent random variables with mean $0$ and variance $1$. Then their dot product, $q \cdot k = \sum_{i=1}^{d_k} q_ik_i$, has mean $0$ and variance $d_k$.}. To counteract this effect, we scale the dot products by $\frac{1}{\sqrt{d_k}}$.
|
||||||
|
|
||||||
|
|
||||||
|
%We suspect this to be caused by the dot products growing too large in magnitude to result in useful gradients after applying the softmax function. To counteract this, we scale the dot product by $1/\sqrt{d_k}$.
|
||||||
|
|
||||||
|
|
||||||
|
\subsubsection{Multi-Head Attention} \label{sec:multihead}
|
||||||
|
|
||||||
|
\begin{figure}
|
||||||
|
\begin{minipage}[t]{0.5\textwidth}
|
||||||
|
\centering
|
||||||
|
Scaled Dot-Product Attention \\
|
||||||
|
\vspace{0.5cm}
|
||||||
|
\includegraphics[scale=0.6]{Figures/ModalNet-19}
|
||||||
|
\end{minipage}
|
||||||
|
\begin{minipage}[t]{0.5\textwidth}
|
||||||
|
\centering
|
||||||
|
Multi-Head Attention \\
|
||||||
|
\vspace{0.1cm}
|
||||||
|
\includegraphics[scale=0.6]{Figures/ModalNet-20}
|
||||||
|
\end{minipage}
|
||||||
|
|
||||||
|
|
||||||
|
% \centering
|
||||||
|
|
||||||
|
\caption{(left) Scaled Dot-Product Attention. (right) Multi-Head Attention consists of several attention layers running in parallel.}
|
||||||
|
\label{fig:multi-head-att}
|
||||||
|
\end{figure}
|
||||||
|
|
||||||
|
Instead of performing a single attention function with $\dmodel$-dimensional keys, values and queries, we found it beneficial to linearly project the queries, keys and values $h$ times with different, learned linear projections to $d_k$, $d_k$ and $d_v$ dimensions, respectively.
|
||||||
|
On each of these projected versions of queries, keys and values we then perform the attention function in parallel, yielding $d_v$-dimensional output values. These are concatenated and once again projected, resulting in the final values, as depicted in Figure~\ref{fig:multi-head-att}.
|
||||||
|
|
||||||
|
Multi-head attention allows the model to jointly attend to information from different representation subspaces at different positions. With a single attention head, averaging inhibits this.
|
||||||
|
|
||||||
|
\begin{align*}
|
||||||
|
\mathrm{MultiHead}(Q, K, V) &= \mathrm{Concat}(\mathrm{head_1}, ..., \mathrm{head_h})W^O\\
|
||||||
|
% \mathrm{where} \mathrm{head_i} &= \mathrm{Attention}(QW_Q_i^{\dmodel \times d_q}, KW_K_i^{\dmodel \times d_k}, VW^V_i^{\dmodel \times d_v})\\
|
||||||
|
\text{where}~\mathrm{head_i} &= \mathrm{Attention}(QW^Q_i, KW^K_i, VW^V_i)\\
|
||||||
|
\end{align*}
|
||||||
|
|
||||||
|
Where the projections are parameter matrices $W^Q_i \in \mathbb{R}^{\dmodel \times d_k}$, $W^K_i \in \mathbb{R}^{\dmodel \times d_k}$, $W^V_i \in \mathbb{R}^{\dmodel \times d_v}$ and $W^O \in \mathbb{R}^{hd_v \times \dmodel}$.
|
||||||
|
|
||||||
|
|
||||||
|
%find it better (and no more expensive) to have multiple parallel attention layers (each over the full set of positions) with proportionally lower-dimensional keys, values and queries. We call this "Multi-Head Attention" (Figure~\ref{fig:multi-head-att}). The keys, values, and queries for each of these parallel attention layers are computed by learned linear transformations of the inputs to the multi-head attention. We use different linear transformations across different parallel attention layers. The output of the parallel attention layers are concatenated, and then passed through a final learned linear transformation.
|
||||||
|
|
||||||
|
In this work we employ $h=8$ parallel attention layers, or heads. For each of these we use $d_k=d_v=\dmodel/h=64$.
|
||||||
|
Due to the reduced dimension of each head, the total computational cost is similar to that of single-head attention with full dimensionality.
|
||||||
|
|
||||||
|
\subsubsection{Applications of Attention in our Model}
|
||||||
|
|
||||||
|
The Transformer uses multi-head attention in three different ways:
|
||||||
|
\begin{itemize}
|
||||||
|
\item In "encoder-decoder attention" layers, the queries come from the previous decoder layer, and the memory keys and values come from the output of the encoder. This allows every position in the decoder to attend over all positions in the input sequence. This mimics the typical encoder-decoder attention mechanisms in sequence-to-sequence models such as \citep{wu2016google, bahdanau2014neural,JonasFaceNet2017}.
|
||||||
|
|
||||||
|
\item The encoder contains self-attention layers. In a self-attention layer all of the keys, values and queries come from the same place, in this case, the output of the previous layer in the encoder. Each position in the encoder can attend to all positions in the previous layer of the encoder.
|
||||||
|
|
||||||
|
\item Similarly, self-attention layers in the decoder allow each position in the decoder to attend to all positions in the decoder up to and including that position. We need to prevent leftward information flow in the decoder to preserve the auto-regressive property. We implement this inside of scaled dot-product attention by masking out (setting to $-\infty$) all values in the input of the softmax which correspond to illegal connections. See Figure~\ref{fig:multi-head-att}.
|
||||||
|
|
||||||
|
\end{itemize}
|
||||||
|
|
||||||
|
\subsection{Position-wise Feed-Forward Networks}\label{sec:ffn}
|
||||||
|
|
||||||
|
In addition to attention sub-layers, each of the layers in our encoder and decoder contains a fully connected feed-forward network, which is applied to each position separately and identically. This consists of two linear transformations with a ReLU activation in between.
|
||||||
|
|
||||||
|
\begin{equation}
|
||||||
|
\mathrm{FFN}(x)=\max(0, xW_1 + b_1) W_2 + b_2
|
||||||
|
\end{equation}
|
||||||
|
|
||||||
|
While the linear transformations are the same across different positions, they use different parameters from layer to layer. Another way of describing this is as two convolutions with kernel size 1. The dimensionality of input and output is $\dmodel=512$, and the inner-layer has dimensionality $d_{ff}=2048$.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
%In the appendix, we describe how the position-wise feed-forward network can also be seen as a form of attention.
|
||||||
|
|
||||||
|
%from Jakob: The number of operations required for the model to relate signals from two arbitrary input or output positions grows in the distance between positions in input or output, linearly for ConvS2S and logarithmically for ByteNet, making it harder to learn dependencies between these positions \citep{hochreiter2001gradient}. In the transformer this is reduced to a constant number of operations, albeit at the cost of effective resolution caused by averaging attention-weighted positions, an effect we aim to counteract with multi-headed attention.
|
||||||
|
|
||||||
|
|
||||||
|
%Figure~\ref{fig:simple-att} presents a simple attention function, $A$, with a single head, that forms the basis of our multi-head attention. $A$ takes a query key vector $\kq$, matrices of memory keys $\km$ and memory values $\vm$ ,and produces a query value vector $\vq$ as
|
||||||
|
%\begin{equation*} \label{eq:attention}
|
||||||
|
% A(\kq, \km, \vm) = {\vm}^T (Softmax(\km \kq).
|
||||||
|
%\end{equation*}
|
||||||
|
%We linearly transform $\kq,\,\km$, and $\vm$ with learned matrices ${\Wkq \text{,} \, \Wkm}$, and ${\Wvm}$ before calling the attention function, and transform the output query with $\Wvq$ before handing it to the feed forward layer. Each attention layer has it's own set of transformation matrices, which are shared across all query positions. $A$ is applied in parallel for each query position, and is implemented very efficiently as a batch of matrix multiplies. The self-attention and encoder-decoder attention layers use $A$, but with different arguments. For example, in encdoder self-attention, queries in encoder layer $i$ attention to memories in encoder layer $i-1$. To ensure that decoder self-attention layers do not look at future words, we add $- \inf$ to the softmax logits in positions $j+1$ to query length for query position $l$.
|
||||||
|
|
||||||
|
%In simple attention, the query value is a weighted combination of the memory values where the attention weights sum to one. Although this function performs well in practice, the constraint on attention weights can restrict the amount of information that flows from memories to queries because the query cannot focus on multiple memory positions at once, which might be desirable when translating long sequences. \marginpar{@usz, could you think of an example of this ?} We remedy this by maintaining multiple attention heads at each query position that attend to all memory positions in parallel, with a different set of parameters per attention head $h$.
|
||||||
|
%\marginpar{}
|
||||||
|
|
||||||
|
\subsection{Embeddings and Softmax}
|
||||||
|
Similarly to other sequence transduction models, we use learned embeddings to convert the input tokens and output tokens to vectors of dimension $\dmodel$. We also use the usual learned linear transformation and softmax function to convert the decoder output to predicted next-token probabilities. In our model, we share the same weight matrix between the two embedding layers and the pre-softmax linear transformation, similar to \citep{press2016using}. In the embedding layers, we multiply those weights by $\sqrt{\dmodel}$.
|
||||||
|
|
||||||
|
|
||||||
|
\subsection{Positional Encoding}
|
||||||
|
Since our model contains no recurrence and no convolution, in order for the model to make use of the order of the sequence, we must inject some information about the relative or absolute position of the tokens in the sequence. To this end, we add "positional encodings" to the input embeddings at the bottoms of the encoder and decoder stacks. The positional encodings have the same dimension $\dmodel$ as the embeddings, so that the two can be summed. There are many choices of positional encodings, learned and fixed \citep{JonasFaceNet2017}.
|
||||||
|
|
||||||
|
In this work, we use sine and cosine functions of different frequencies:
|
||||||
|
|
||||||
|
\begin{align*}
|
||||||
|
PE_{(pos,2i)} = sin(pos / 10000^{2i/\dmodel}) \\
|
||||||
|
PE_{(pos,2i+1)} = cos(pos / 10000^{2i/\dmodel})
|
||||||
|
\end{align*}
|
||||||
|
|
||||||
|
where $pos$ is the position and $i$ is the dimension. That is, each dimension of the positional encoding corresponds to a sinusoid. The wavelengths form a geometric progression from $2\pi$ to $10000 \cdot 2\pi$. We chose this function because we hypothesized it would allow the model to easily learn to attend by relative positions, since for any fixed offset $k$, $PE_{pos+k}$ can be represented as a linear function of $PE_{pos}$.
|
||||||
|
|
||||||
|
We also experimented with using learned positional embeddings \citep{JonasFaceNet2017} instead, and found that the two versions produced nearly identical results (see Table~\ref{tab:variations} row (E)). We chose the sinusoidal version because it may allow the model to extrapolate to sequence lengths longer than the ones encountered during training.
|
||||||
@@ -0,0 +1,45 @@
|
|||||||
|
\pagebreak
|
||||||
|
\section*{Two Feed-Forward Layers = Attention over Parameters}\label{sec:parameter_attention}
|
||||||
|
|
||||||
|
In addition to attention layers, our model contains position-wise feed-forward networks (Section \ref{sec:ffn}), which consist of two linear transformations with a ReLU activation in between. In fact, these networks too can be seen as a form of attention. Compare the formula for such a network with the formula for a simple dot-product attention layer (biases and scaling factors omitted):
|
||||||
|
|
||||||
|
\begin{align*}
|
||||||
|
FFN(x, W_1, W_2) = ReLU(xW_1)W_2 \\
|
||||||
|
A(q, K, V) = Softmax(qK^T)V
|
||||||
|
\end{align*}
|
||||||
|
|
||||||
|
Based on the similarity of these formulae, the two-layer feed-forward network can be seen as a kind of attention, where the keys and values are the rows of the trainable parameter matrices $W_1$ and $W_2$, and where we use ReLU instead of Softmax in the compatibility function.
|
||||||
|
|
||||||
|
%the compatablity function is $compat(q, k_i) = ReLU(q \cdot k_i)$ instead of $Softmax(qK_T)_i$.
|
||||||
|
|
||||||
|
Given this similarity, we experimented with replacing the position-wise feed-forward networks with attention layers similar to the ones we use everywhere else our model. The multi-head-attention-over-parameters sublayer is identical to the multi-head attention described in \ref{sec:multihead}, except that the "keys" and "values" inputs to each attention head are trainable model parameters, as opposed to being linear projections of a previous layer. These parameters are scaled up by a factor of $\sqrt{d_{model}}$ in order to be more similar to activations.
|
||||||
|
|
||||||
|
In our first experiment, we replaced each position-wise feed-forward network with a multi-head-attention-over-parameters sublayer with $h_p=8$ heads, key-dimensionality $d_{pk}=64$, and value-dimensionality $d_{pv}=64$, using $n_p=1536$ key-value pairs for each attention head. The sublayer has a total of $2097152$ parameters, including the parameters in the query projection and the output projection. This matches the number of parameters in the position-wise feed-forward network that we replaced. While the theoretical amount of computation is also the same, in practice, the attention version caused the step times to be about 30\% longer.
|
||||||
|
|
||||||
|
In our second experiment, we used $h_p=8$ heads, and $n_p=512$ key-value pairs for each attention head, again matching the total number of parameters in the base model.
|
||||||
|
|
||||||
|
Results for the first experiment were slightly worse than for the base model, and results for the second experiment were slightly better, see Table~\ref{tab:parameter_attention}.
|
||||||
|
|
||||||
|
\begin{table}[h]
|
||||||
|
\caption{Replacing the position-wise feed-forward networks with multihead-attention-over-parameters produces similar results to the base model. All metrics are on the English-to-German translation development set, newstest2013.}
|
||||||
|
\label{tab:parameter_attention}
|
||||||
|
\begin{center}
|
||||||
|
\vspace{-2mm}
|
||||||
|
%\scalebox{1.0}{
|
||||||
|
\begin{tabular}{c|cccccc|cccc}
|
||||||
|
\hline\rule{0pt}{2.0ex}
|
||||||
|
& \multirow{2}{*}{$\dmodel$} & \multirow{2}{*}{$\dff$} &
|
||||||
|
\multirow{2}{*}{$h_p$} & \multirow{2}{*}{$d_{pk}$} & \multirow{2}{*}{$d_{pv}$} &
|
||||||
|
\multirow{2}{*}{$n_p$} &
|
||||||
|
PPL & BLEU & params & training\\
|
||||||
|
& & & & & & & (dev) & (dev) & $\times10^6$ & time \\
|
||||||
|
\hline\rule{0pt}{2.0ex}
|
||||||
|
base & 512 & 2048 & & & & & 4.92 & 25.8 & 65 & 12 hours\\
|
||||||
|
\hline\rule{0pt}{2.0ex}
|
||||||
|
AOP$_1$ & 512 & & 8 & 64 & 64 & 1536 & 4.92& 25.5 & 65 & 16 hours\\
|
||||||
|
AOP$_2$ & 512 & & 16 & 64 & 64 & 512 & \textbf{4.86} & \textbf{25.9} & 65 & 16 hours \\
|
||||||
|
\hline
|
||||||
|
\end{tabular}
|
||||||
|
%}
|
||||||
|
\end{center}
|
||||||
|
\end{table}
|
||||||
@@ -0,0 +1,8 @@
|
|||||||
|
chatgpt的老祖宗《Attention is all you need》
|
||||||
|
|
||||||
|
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin
|
||||||
|
|
||||||
|
真实的摘要如下
|
||||||
|
The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.
|
||||||
|
|
||||||
|
https://arxiv.org/abs/1706.03762
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
from stable_baselines3.dqn.dqn import DQN
|
||||||
|
from stable_baselines3.dqn.policies import CnnPolicy, MlpPolicy
|
||||||
@@ -0,0 +1,245 @@
|
|||||||
|
from typing import Any, Dict, List, Optional, Tuple, Type, Union
|
||||||
|
|
||||||
|
import gym
|
||||||
|
import numpy as np
|
||||||
|
import torch as th
|
||||||
|
from torch.nn import functional as F
|
||||||
|
|
||||||
|
from stable_baselines3.common import logger
|
||||||
|
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
|
||||||
|
from stable_baselines3.common.preprocessing import maybe_transpose
|
||||||
|
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
|
||||||
|
from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update
|
||||||
|
from stable_baselines3.dqn.policies import DQNPolicy
|
||||||
|
|
||||||
|
|
||||||
|
class DQN(OffPolicyAlgorithm):
|
||||||
|
"""
|
||||||
|
Deep Q-Network (DQN)
|
||||||
|
|
||||||
|
Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236
|
||||||
|
Default hyperparameters are taken from the nature paper,
|
||||||
|
except for the optimizer and learning rate that were taken from Stable Baselines defaults.
|
||||||
|
|
||||||
|
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
|
||||||
|
:param env: The environment to learn from (if registered in Gym, can be str)
|
||||||
|
:param learning_rate: The learning rate, it can be a function
|
||||||
|
of the current progress remaining (from 1 to 0)
|
||||||
|
:param buffer_size: size of the replay buffer
|
||||||
|
:param learning_starts: how many steps of the model to collect transitions for before learning starts
|
||||||
|
:param batch_size: Minibatch size for each gradient update
|
||||||
|
:param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update
|
||||||
|
:param gamma: the discount factor
|
||||||
|
:param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
|
||||||
|
like ``(5, "step")`` or ``(2, "episode")``.
|
||||||
|
:param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
|
||||||
|
Set to ``-1`` means to do as many gradient steps as steps done in the environment
|
||||||
|
during the rollout.
|
||||||
|
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
|
||||||
|
at a cost of more complexity.
|
||||||
|
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
|
||||||
|
:param target_update_interval: update the target network every ``target_update_interval``
|
||||||
|
environment steps.
|
||||||
|
:param exploration_fraction: fraction of entire training period over which the exploration rate is reduced
|
||||||
|
:param exploration_initial_eps: initial value of random action probability
|
||||||
|
:param exploration_final_eps: final value of random action probability
|
||||||
|
:param max_grad_norm: The maximum value for the gradient clipping
|
||||||
|
:param tensorboard_log: the log location for tensorboard (if None, no logging)
|
||||||
|
:param create_eval_env: Whether to create a second environment that will be
|
||||||
|
used for evaluating the agent periodically. (Only available when passing string for the environment)
|
||||||
|
:param policy_kwargs: additional arguments to be passed to the policy on creation
|
||||||
|
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
|
||||||
|
:param seed: Seed for the pseudo random generators
|
||||||
|
:param device: Device (cpu, cuda, ...) on which the code should be run.
|
||||||
|
Setting it to auto, the code will be run on the GPU if possible.
|
||||||
|
:param _init_setup_model: Whether or not to build the network at the creation of the instance
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
policy: Union[str, Type[DQNPolicy]],
|
||||||
|
env: Union[GymEnv, str],
|
||||||
|
learning_rate: Union[float, Schedule] = 1e-4,
|
||||||
|
buffer_size: int = 1000000,
|
||||||
|
learning_starts: int = 50000,
|
||||||
|
batch_size: Optional[int] = 32,
|
||||||
|
tau: float = 1.0,
|
||||||
|
gamma: float = 0.99,
|
||||||
|
train_freq: Union[int, Tuple[int, str]] = 4,
|
||||||
|
gradient_steps: int = 1,
|
||||||
|
optimize_memory_usage: bool = False,
|
||||||
|
target_update_interval: int = 10000,
|
||||||
|
exploration_fraction: float = 0.1,
|
||||||
|
exploration_initial_eps: float = 1.0,
|
||||||
|
exploration_final_eps: float = 0.05,
|
||||||
|
max_grad_norm: float = 10,
|
||||||
|
tensorboard_log: Optional[str] = None,
|
||||||
|
create_eval_env: bool = False,
|
||||||
|
policy_kwargs: Optional[Dict[str, Any]] = None,
|
||||||
|
verbose: int = 0,
|
||||||
|
seed: Optional[int] = None,
|
||||||
|
device: Union[th.device, str] = "auto",
|
||||||
|
_init_setup_model: bool = True,
|
||||||
|
):
|
||||||
|
|
||||||
|
super(DQN, self).__init__(
|
||||||
|
policy,
|
||||||
|
env,
|
||||||
|
DQNPolicy,
|
||||||
|
learning_rate,
|
||||||
|
buffer_size,
|
||||||
|
learning_starts,
|
||||||
|
batch_size,
|
||||||
|
tau,
|
||||||
|
gamma,
|
||||||
|
train_freq,
|
||||||
|
gradient_steps,
|
||||||
|
action_noise=None, # No action noise
|
||||||
|
policy_kwargs=policy_kwargs,
|
||||||
|
tensorboard_log=tensorboard_log,
|
||||||
|
verbose=verbose,
|
||||||
|
device=device,
|
||||||
|
create_eval_env=create_eval_env,
|
||||||
|
seed=seed,
|
||||||
|
sde_support=False,
|
||||||
|
optimize_memory_usage=optimize_memory_usage,
|
||||||
|
supported_action_spaces=(gym.spaces.Discrete,),
|
||||||
|
)
|
||||||
|
|
||||||
|
self.exploration_initial_eps = exploration_initial_eps
|
||||||
|
self.exploration_final_eps = exploration_final_eps
|
||||||
|
self.exploration_fraction = exploration_fraction
|
||||||
|
self.target_update_interval = target_update_interval
|
||||||
|
self.max_grad_norm = max_grad_norm
|
||||||
|
# "epsilon" for the epsilon-greedy exploration
|
||||||
|
self.exploration_rate = 0.0
|
||||||
|
# Linear schedule will be defined in `_setup_model()`
|
||||||
|
self.exploration_schedule = None
|
||||||
|
self.q_net, self.q_net_target = None, None
|
||||||
|
|
||||||
|
if _init_setup_model:
|
||||||
|
self._setup_model()
|
||||||
|
|
||||||
|
def _setup_model(self) -> None:
|
||||||
|
super(DQN, self)._setup_model()
|
||||||
|
self._create_aliases()
|
||||||
|
self.exploration_schedule = get_linear_fn(
|
||||||
|
self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction
|
||||||
|
)
|
||||||
|
|
||||||
|
def _create_aliases(self) -> None:
|
||||||
|
self.q_net = self.policy.q_net
|
||||||
|
self.q_net_target = self.policy.q_net_target
|
||||||
|
|
||||||
|
def _on_step(self) -> None:
|
||||||
|
"""
|
||||||
|
Update the exploration rate and target network if needed.
|
||||||
|
This method is called in ``collect_rollouts()`` after each step in the environment.
|
||||||
|
"""
|
||||||
|
if self.num_timesteps % self.target_update_interval == 0:
|
||||||
|
polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)
|
||||||
|
|
||||||
|
self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)
|
||||||
|
logger.record("rollout/exploration rate", self.exploration_rate)
|
||||||
|
|
||||||
|
def train(self, gradient_steps: int, batch_size: int = 100) -> None:
|
||||||
|
# Update learning rate according to schedule
|
||||||
|
self._update_learning_rate(self.policy.optimizer)
|
||||||
|
|
||||||
|
losses = []
|
||||||
|
for _ in range(gradient_steps):
|
||||||
|
# Sample replay buffer
|
||||||
|
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
|
||||||
|
|
||||||
|
with th.no_grad():
|
||||||
|
# Compute the next Q-values using the target network
|
||||||
|
next_q_values = self.q_net_target(replay_data.next_observations)
|
||||||
|
# Follow greedy policy: use the one with the highest value
|
||||||
|
next_q_values, _ = next_q_values.max(dim=1)
|
||||||
|
# Avoid potential broadcast issue
|
||||||
|
next_q_values = next_q_values.reshape(-1, 1)
|
||||||
|
# 1-step TD target
|
||||||
|
target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
|
||||||
|
|
||||||
|
# Get current Q-values estimates
|
||||||
|
current_q_values = self.q_net(replay_data.observations)
|
||||||
|
|
||||||
|
# Retrieve the q-values for the actions from the replay buffer
|
||||||
|
current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long())
|
||||||
|
|
||||||
|
# Compute Huber loss (less sensitive to outliers)
|
||||||
|
loss = F.smooth_l1_loss(current_q_values, target_q_values)
|
||||||
|
losses.append(loss.item())
|
||||||
|
|
||||||
|
# Optimize the policy
|
||||||
|
self.policy.optimizer.zero_grad()
|
||||||
|
loss.backward()
|
||||||
|
# Clip gradient norm
|
||||||
|
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
|
||||||
|
self.policy.optimizer.step()
|
||||||
|
|
||||||
|
# Increase update counter
|
||||||
|
self._n_updates += gradient_steps
|
||||||
|
|
||||||
|
logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
|
||||||
|
logger.record("train/loss", np.mean(losses))
|
||||||
|
|
||||||
|
def predict(
|
||||||
|
self,
|
||||||
|
observation: np.ndarray,
|
||||||
|
state: Optional[np.ndarray] = None,
|
||||||
|
mask: Optional[np.ndarray] = None,
|
||||||
|
deterministic: bool = False,
|
||||||
|
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
|
||||||
|
"""
|
||||||
|
Overrides the base_class predict function to include epsilon-greedy exploration.
|
||||||
|
|
||||||
|
:param observation: the input observation
|
||||||
|
:param state: The last states (can be None, used in recurrent policies)
|
||||||
|
:param mask: The last masks (can be None, used in recurrent policies)
|
||||||
|
:param deterministic: Whether or not to return deterministic actions.
|
||||||
|
:return: the model's action and the next state
|
||||||
|
(used in recurrent policies)
|
||||||
|
"""
|
||||||
|
if not deterministic and np.random.rand() < self.exploration_rate:
|
||||||
|
if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space):
|
||||||
|
n_batch = observation.shape[0]
|
||||||
|
action = np.array([self.action_space.sample() for _ in range(n_batch)])
|
||||||
|
else:
|
||||||
|
action = np.array(self.action_space.sample())
|
||||||
|
else:
|
||||||
|
action, state = self.policy.predict(observation, state, mask, deterministic)
|
||||||
|
return action, state
|
||||||
|
|
||||||
|
def learn(
|
||||||
|
self,
|
||||||
|
total_timesteps: int,
|
||||||
|
callback: MaybeCallback = None,
|
||||||
|
log_interval: int = 4,
|
||||||
|
eval_env: Optional[GymEnv] = None,
|
||||||
|
eval_freq: int = -1,
|
||||||
|
n_eval_episodes: int = 5,
|
||||||
|
tb_log_name: str = "DQN",
|
||||||
|
eval_log_path: Optional[str] = None,
|
||||||
|
reset_num_timesteps: bool = True,
|
||||||
|
) -> OffPolicyAlgorithm:
|
||||||
|
|
||||||
|
return super(DQN, self).learn(
|
||||||
|
total_timesteps=total_timesteps,
|
||||||
|
callback=callback,
|
||||||
|
log_interval=log_interval,
|
||||||
|
eval_env=eval_env,
|
||||||
|
eval_freq=eval_freq,
|
||||||
|
n_eval_episodes=n_eval_episodes,
|
||||||
|
tb_log_name=tb_log_name,
|
||||||
|
eval_log_path=eval_log_path,
|
||||||
|
reset_num_timesteps=reset_num_timesteps,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _excluded_save_params(self) -> List[str]:
|
||||||
|
return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"]
|
||||||
|
|
||||||
|
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
|
||||||
|
state_dicts = ["policy", "policy.optimizer"]
|
||||||
|
|
||||||
|
return state_dicts, []
|
||||||
@@ -0,0 +1,237 @@
|
|||||||
|
from typing import Any, Dict, List, Optional, Type
|
||||||
|
|
||||||
|
import gym
|
||||||
|
import torch as th
|
||||||
|
from torch import nn
|
||||||
|
|
||||||
|
from stable_baselines3.common.policies import BasePolicy, register_policy
|
||||||
|
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp
|
||||||
|
from stable_baselines3.common.type_aliases import Schedule
|
||||||
|
|
||||||
|
|
||||||
|
class QNetwork(BasePolicy):
|
||||||
|
"""
|
||||||
|
Action-Value (Q-Value) network for DQN
|
||||||
|
|
||||||
|
:param observation_space: Observation space
|
||||||
|
:param action_space: Action space
|
||||||
|
:param net_arch: The specification of the policy and value networks.
|
||||||
|
:param activation_fn: Activation function
|
||||||
|
:param normalize_images: Whether to normalize images or not,
|
||||||
|
dividing by 255.0 (True by default)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
observation_space: gym.spaces.Space,
|
||||||
|
action_space: gym.spaces.Space,
|
||||||
|
features_extractor: nn.Module,
|
||||||
|
features_dim: int,
|
||||||
|
net_arch: Optional[List[int]] = None,
|
||||||
|
activation_fn: Type[nn.Module] = nn.ReLU,
|
||||||
|
normalize_images: bool = True,
|
||||||
|
):
|
||||||
|
super(QNetwork, self).__init__(
|
||||||
|
observation_space,
|
||||||
|
action_space,
|
||||||
|
features_extractor=features_extractor,
|
||||||
|
normalize_images=normalize_images,
|
||||||
|
)
|
||||||
|
|
||||||
|
if net_arch is None:
|
||||||
|
net_arch = [64, 64]
|
||||||
|
|
||||||
|
self.net_arch = net_arch
|
||||||
|
self.activation_fn = activation_fn
|
||||||
|
self.features_extractor = features_extractor
|
||||||
|
self.features_dim = features_dim
|
||||||
|
self.normalize_images = normalize_images
|
||||||
|
action_dim = self.action_space.n # number of actions
|
||||||
|
q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn)
|
||||||
|
self.q_net = nn.Sequential(*q_net)
|
||||||
|
|
||||||
|
def forward(self, obs: th.Tensor) -> th.Tensor:
|
||||||
|
"""
|
||||||
|
Predict the q-values.
|
||||||
|
|
||||||
|
:param obs: Observation
|
||||||
|
:return: The estimated Q-Value for each action.
|
||||||
|
"""
|
||||||
|
return self.q_net(self.extract_features(obs))
|
||||||
|
|
||||||
|
def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor:
|
||||||
|
q_values = self.forward(observation)
|
||||||
|
# Greedy action
|
||||||
|
action = q_values.argmax(dim=1).reshape(-1)
|
||||||
|
return action
|
||||||
|
|
||||||
|
def _get_constructor_parameters(self) -> Dict[str, Any]:
|
||||||
|
data = super()._get_constructor_parameters()
|
||||||
|
|
||||||
|
data.update(
|
||||||
|
dict(
|
||||||
|
net_arch=self.net_arch,
|
||||||
|
features_dim=self.features_dim,
|
||||||
|
activation_fn=self.activation_fn,
|
||||||
|
features_extractor=self.features_extractor,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
class DQNPolicy(BasePolicy):
|
||||||
|
"""
|
||||||
|
Policy class with Q-Value Net and target net for DQN
|
||||||
|
|
||||||
|
:param observation_space: Observation space
|
||||||
|
:param action_space: Action space
|
||||||
|
:param lr_schedule: Learning rate schedule (could be constant)
|
||||||
|
:param net_arch: The specification of the policy and value networks.
|
||||||
|
:param activation_fn: Activation function
|
||||||
|
:param features_extractor_class: Features extractor to use.
|
||||||
|
:param features_extractor_kwargs: Keyword arguments
|
||||||
|
to pass to the features extractor.
|
||||||
|
:param normalize_images: Whether to normalize images or not,
|
||||||
|
dividing by 255.0 (True by default)
|
||||||
|
:param optimizer_class: The optimizer to use,
|
||||||
|
``th.optim.Adam`` by default
|
||||||
|
:param optimizer_kwargs: Additional keyword arguments,
|
||||||
|
excluding the learning rate, to pass to the optimizer
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
observation_space: gym.spaces.Space,
|
||||||
|
action_space: gym.spaces.Space,
|
||||||
|
lr_schedule: Schedule,
|
||||||
|
net_arch: Optional[List[int]] = None,
|
||||||
|
activation_fn: Type[nn.Module] = nn.ReLU,
|
||||||
|
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
|
||||||
|
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
|
||||||
|
normalize_images: bool = True,
|
||||||
|
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
|
||||||
|
optimizer_kwargs: Optional[Dict[str, Any]] = None,
|
||||||
|
):
|
||||||
|
super(DQNPolicy, self).__init__(
|
||||||
|
observation_space,
|
||||||
|
action_space,
|
||||||
|
features_extractor_class,
|
||||||
|
features_extractor_kwargs,
|
||||||
|
optimizer_class=optimizer_class,
|
||||||
|
optimizer_kwargs=optimizer_kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
if net_arch is None:
|
||||||
|
if features_extractor_class == FlattenExtractor:
|
||||||
|
net_arch = [64, 64]
|
||||||
|
else:
|
||||||
|
net_arch = []
|
||||||
|
|
||||||
|
self.net_arch = net_arch
|
||||||
|
self.activation_fn = activation_fn
|
||||||
|
self.normalize_images = normalize_images
|
||||||
|
|
||||||
|
self.net_args = {
|
||||||
|
"observation_space": self.observation_space,
|
||||||
|
"action_space": self.action_space,
|
||||||
|
"net_arch": self.net_arch,
|
||||||
|
"activation_fn": self.activation_fn,
|
||||||
|
"normalize_images": normalize_images,
|
||||||
|
}
|
||||||
|
|
||||||
|
self.q_net, self.q_net_target = None, None
|
||||||
|
self._build(lr_schedule)
|
||||||
|
|
||||||
|
def _build(self, lr_schedule: Schedule) -> None:
|
||||||
|
"""
|
||||||
|
Create the network and the optimizer.
|
||||||
|
|
||||||
|
:param lr_schedule: Learning rate schedule
|
||||||
|
lr_schedule(1) is the initial learning rate
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.q_net = self.make_q_net()
|
||||||
|
self.q_net_target = self.make_q_net()
|
||||||
|
self.q_net_target.load_state_dict(self.q_net.state_dict())
|
||||||
|
|
||||||
|
# Setup optimizer with initial learning rate
|
||||||
|
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
|
||||||
|
|
||||||
|
def make_q_net(self) -> QNetwork:
|
||||||
|
# Make sure we always have separate networks for features extractors etc
|
||||||
|
net_args = self._update_features_extractor(self.net_args, features_extractor=None)
|
||||||
|
return QNetwork(**net_args).to(self.device)
|
||||||
|
|
||||||
|
def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
|
||||||
|
return self._predict(obs, deterministic=deterministic)
|
||||||
|
|
||||||
|
def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
|
||||||
|
return self.q_net._predict(obs, deterministic=deterministic)
|
||||||
|
|
||||||
|
def _get_constructor_parameters(self) -> Dict[str, Any]:
|
||||||
|
data = super()._get_constructor_parameters()
|
||||||
|
|
||||||
|
data.update(
|
||||||
|
dict(
|
||||||
|
net_arch=self.net_args["net_arch"],
|
||||||
|
activation_fn=self.net_args["activation_fn"],
|
||||||
|
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
|
||||||
|
optimizer_class=self.optimizer_class,
|
||||||
|
optimizer_kwargs=self.optimizer_kwargs,
|
||||||
|
features_extractor_class=self.features_extractor_class,
|
||||||
|
features_extractor_kwargs=self.features_extractor_kwargs,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
MlpPolicy = DQNPolicy
|
||||||
|
|
||||||
|
|
||||||
|
class CnnPolicy(DQNPolicy):
|
||||||
|
"""
|
||||||
|
Policy class for DQN when using images as input.
|
||||||
|
|
||||||
|
:param observation_space: Observation space
|
||||||
|
:param action_space: Action space
|
||||||
|
:param lr_schedule: Learning rate schedule (could be constant)
|
||||||
|
:param net_arch: The specification of the policy and value networks.
|
||||||
|
:param activation_fn: Activation function
|
||||||
|
:param features_extractor_class: Features extractor to use.
|
||||||
|
:param normalize_images: Whether to normalize images or not,
|
||||||
|
dividing by 255.0 (True by default)
|
||||||
|
:param optimizer_class: The optimizer to use,
|
||||||
|
``th.optim.Adam`` by default
|
||||||
|
:param optimizer_kwargs: Additional keyword arguments,
|
||||||
|
excluding the learning rate, to pass to the optimizer
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
observation_space: gym.spaces.Space,
|
||||||
|
action_space: gym.spaces.Space,
|
||||||
|
lr_schedule: Schedule,
|
||||||
|
net_arch: Optional[List[int]] = None,
|
||||||
|
activation_fn: Type[nn.Module] = nn.ReLU,
|
||||||
|
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
|
||||||
|
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
|
||||||
|
normalize_images: bool = True,
|
||||||
|
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
|
||||||
|
optimizer_kwargs: Optional[Dict[str, Any]] = None,
|
||||||
|
):
|
||||||
|
super(CnnPolicy, self).__init__(
|
||||||
|
observation_space,
|
||||||
|
action_space,
|
||||||
|
lr_schedule,
|
||||||
|
net_arch,
|
||||||
|
activation_fn,
|
||||||
|
features_extractor_class,
|
||||||
|
features_extractor_kwargs,
|
||||||
|
normalize_images,
|
||||||
|
optimizer_class,
|
||||||
|
optimizer_kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
register_policy("MlpPolicy", MlpPolicy)
|
||||||
|
register_policy("CnnPolicy", CnnPolicy)
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
github stablebaseline3
|
||||||
|
https://github.com/DLR-RM/stable-baselines3
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
"In practice, we found that a high-entropy initial state is more likely to increase the speed of training.
|
||||||
|
The entropy is calculated by:
|
||||||
|
$$H=-\sum_{k= 1}^{n_k} p(k) \cdot \log p(k), p(k)=\frac{|A_k|}{|\mathcal{A}|}$$
|
||||||
|
where $H$ is the entropy, $|A_k|$ is the number of agent nodes in $k$-th cluster, $|\mathcal{A}|$ is the total number of agents.
|
||||||
|
To ensure the Cooperation Graph initialization has higher entropy,
|
||||||
|
we will randomly generate multiple initial states,
|
||||||
|
rank by their entropy and then pick the one with maximum $H$."
|
||||||
|
|
||||||
|
```
|
||||||
|
FROM ubuntu:latest
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y python3 python3-pip && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN echo '[global]' > /etc/pip.conf && \
|
||||||
|
echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
||||||
|
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
||||||
|
|
||||||
|
RUN pip3 install gradio requests[socks] mdtex2html
|
||||||
|
|
||||||
|
COPY . /gpt
|
||||||
|
WORKDIR /gpt
|
||||||
|
|
||||||
|
|
||||||
|
CMD ["python3", "main.py"]
|
||||||
|
```
|
||||||
138
crazy_functions/代码重写为全英文_多线程.py
普通文件
138
crazy_functions/代码重写为全英文_多线程.py
普通文件
@@ -0,0 +1,138 @@
|
|||||||
|
import threading
|
||||||
|
from request_llm.bridge_all import predict_no_ui_long_connection
|
||||||
|
from toolbox import update_ui
|
||||||
|
from toolbox import CatchException, write_results_to_file, report_execption
|
||||||
|
from .crazy_utils import breakdown_txt_to_satisfy_token_limit
|
||||||
|
|
||||||
|
def extract_code_block_carefully(txt):
|
||||||
|
splitted = txt.split('```')
|
||||||
|
n_code_block_seg = len(splitted) - 1
|
||||||
|
if n_code_block_seg <= 1: return txt
|
||||||
|
# 剩下的情况都开头除去 ``` 结尾除去一次 ```
|
||||||
|
txt_out = '```'.join(splitted[1:-1])
|
||||||
|
return txt_out
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def break_txt_into_half_at_some_linebreak(txt):
|
||||||
|
lines = txt.split('\n')
|
||||||
|
n_lines = len(lines)
|
||||||
|
pre = lines[:(n_lines//2)]
|
||||||
|
post = lines[(n_lines//2):]
|
||||||
|
return "\n".join(pre), "\n".join(post)
|
||||||
|
|
||||||
|
|
||||||
|
@CatchException
|
||||||
|
def 全项目切换英文(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt, web_port):
|
||||||
|
# 第1步:清空历史,以免输入溢出
|
||||||
|
history = []
|
||||||
|
|
||||||
|
# 第2步:尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
|
try:
|
||||||
|
import tiktoken
|
||||||
|
except:
|
||||||
|
report_execption(chatbot, history,
|
||||||
|
a = f"解析项目: {txt}",
|
||||||
|
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
# 第3步:集合文件
|
||||||
|
import time, glob, os, shutil, re
|
||||||
|
os.makedirs('gpt_log/generated_english_version', exist_ok=True)
|
||||||
|
os.makedirs('gpt_log/generated_english_version/crazy_functions', exist_ok=True)
|
||||||
|
file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
|
||||||
|
[f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
|
||||||
|
# file_manifest = ['./toolbox.py']
|
||||||
|
i_say_show_user_buffer = []
|
||||||
|
|
||||||
|
# 第4步:随便显示点什么防止卡顿的感觉
|
||||||
|
for index, fp in enumerate(file_manifest):
|
||||||
|
# if 'test_project' in fp: continue
|
||||||
|
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||||
|
file_content = f.read()
|
||||||
|
i_say_show_user =f'[{index}/{len(file_manifest)}] 接下来请将以下代码中包含的所有中文转化为英文,只输出转化后的英文代码,请用代码块输出代码: {os.path.abspath(fp)}'
|
||||||
|
i_say_show_user_buffer.append(i_say_show_user)
|
||||||
|
chatbot.append((i_say_show_user, "[Local Message] 等待多线程操作,中间过程不予显示."))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
|
||||||
|
# 第5步:Token限制下的截断与处理
|
||||||
|
MAX_TOKEN = 3000
|
||||||
|
from request_llm.bridge_all import model_info
|
||||||
|
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||||
|
def get_token_fn(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||||
|
|
||||||
|
|
||||||
|
# 第6步:任务函数
|
||||||
|
mutable_return = [None for _ in file_manifest]
|
||||||
|
observe_window = [[""] for _ in file_manifest]
|
||||||
|
def thread_worker(fp,index):
|
||||||
|
if index > 10:
|
||||||
|
time.sleep(60)
|
||||||
|
print('Openai 限制免费用户每分钟20次请求,降低请求频率中。')
|
||||||
|
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||||
|
file_content = f.read()
|
||||||
|
i_say_template = lambda fp, file_content: f'接下来请将以下代码中包含的所有中文转化为英文,只输出代码,文件名是{fp},文件代码是 ```{file_content}```'
|
||||||
|
try:
|
||||||
|
gpt_say = ""
|
||||||
|
# 分解代码文件
|
||||||
|
file_content_breakdown = breakdown_txt_to_satisfy_token_limit(file_content, get_token_fn, MAX_TOKEN)
|
||||||
|
for file_content_partial in file_content_breakdown:
|
||||||
|
i_say = i_say_template(fp, file_content_partial)
|
||||||
|
# # ** gpt request **
|
||||||
|
gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=observe_window[index])
|
||||||
|
gpt_say_partial = extract_code_block_carefully(gpt_say_partial)
|
||||||
|
gpt_say += gpt_say_partial
|
||||||
|
mutable_return[index] = gpt_say
|
||||||
|
except ConnectionAbortedError as token_exceed_err:
|
||||||
|
print('至少一个线程任务Token溢出而失败', e)
|
||||||
|
except Exception as e:
|
||||||
|
print('至少一个线程任务意外失败', e)
|
||||||
|
|
||||||
|
# 第7步:所有线程同时开始执行任务函数
|
||||||
|
handles = [threading.Thread(target=thread_worker, args=(fp,index)) for index, fp in enumerate(file_manifest)]
|
||||||
|
for h in handles:
|
||||||
|
h.daemon = True
|
||||||
|
h.start()
|
||||||
|
chatbot.append(('开始了吗?', f'多线程操作已经开始'))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
# 第8步:循环轮询各个线程是否执行完毕
|
||||||
|
cnt = 0
|
||||||
|
while True:
|
||||||
|
cnt += 1
|
||||||
|
time.sleep(0.2)
|
||||||
|
th_alive = [h.is_alive() for h in handles]
|
||||||
|
if not any(th_alive): break
|
||||||
|
# 更好的UI视觉效果
|
||||||
|
observe_win = []
|
||||||
|
for thread_index, alive in enumerate(th_alive):
|
||||||
|
observe_win.append("[ ..."+observe_window[thread_index][0][-60:].replace('\n','').replace('```','...').replace(' ','.').replace('<br/>','.....').replace('$','.')+"... ]")
|
||||||
|
stat = [f'执行中: {obs}\n\n' if alive else '已完成\n\n' for alive, obs in zip(th_alive, observe_win)]
|
||||||
|
stat_str = ''.join(stat)
|
||||||
|
chatbot[-1] = (chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt%10+1)))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
# 第9步:把结果写入文件
|
||||||
|
for index, h in enumerate(handles):
|
||||||
|
h.join() # 这里其实不需要join了,肯定已经都结束了
|
||||||
|
fp = file_manifest[index]
|
||||||
|
gpt_say = mutable_return[index]
|
||||||
|
i_say_show_user = i_say_show_user_buffer[index]
|
||||||
|
|
||||||
|
where_to_relocate = f'gpt_log/generated_english_version/{fp}'
|
||||||
|
if gpt_say is not None:
|
||||||
|
with open(where_to_relocate, 'w+', encoding='utf-8') as f:
|
||||||
|
f.write(gpt_say)
|
||||||
|
else: # 失败
|
||||||
|
shutil.copyfile(file_manifest[index], where_to_relocate)
|
||||||
|
chatbot.append((i_say_show_user, f'[Local Message] 已完成{os.path.abspath(fp)}的转化,\n\n存入{os.path.abspath(where_to_relocate)}'))
|
||||||
|
history.append(i_say_show_user); history.append(gpt_say)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
# 第10步:备份一个文件
|
||||||
|
res = write_results_to_file(history)
|
||||||
|
chatbot.append(("生成一份任务执行报告", res))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
import glob, shutil, os, re, logging
|
import glob, time, os, re, logging
|
||||||
from toolbox import update_ui, trimmed_format_exc, gen_time_str
|
from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion
|
||||||
from toolbox import CatchException, report_exception, get_log_folder
|
from toolbox import CatchException, report_exception, get_log_folder
|
||||||
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
fast_debug = False
|
fast_debug = False
|
||||||
@@ -18,7 +18,7 @@ class PaperFileGroup():
|
|||||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||||
self.get_token_num = get_token_num
|
self.get_token_num = get_token_num
|
||||||
|
|
||||||
def run_file_split(self, max_token_limit=2048):
|
def run_file_split(self, max_token_limit=1900):
|
||||||
"""
|
"""
|
||||||
将长文本分离开来
|
将长文本分离开来
|
||||||
"""
|
"""
|
||||||
@@ -64,22 +64,22 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|||||||
pfg.file_contents.append(file_content)
|
pfg.file_contents.append(file_content)
|
||||||
|
|
||||||
# <-------- 拆分过长的Markdown文件 ---------->
|
# <-------- 拆分过长的Markdown文件 ---------->
|
||||||
pfg.run_file_split(max_token_limit=2048)
|
pfg.run_file_split(max_token_limit=1500)
|
||||||
n_split = len(pfg.sp_file_contents)
|
n_split = len(pfg.sp_file_contents)
|
||||||
|
|
||||||
# <-------- 多线程翻译开始 ---------->
|
# <-------- 多线程翻译开始 ---------->
|
||||||
if language == 'en->zh':
|
if language == 'en->zh':
|
||||||
inputs_array = ["This is a Markdown file, translate it into Chinese, do NOT modify any existing Markdown commands, do NOT use code wrapper (```), ONLY answer me with translated results:" +
|
inputs_array = ["This is a Markdown file, translate it into Chinese, do not modify any existing Markdown commands:" +
|
||||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||||
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
|
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
|
||||||
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
||||||
elif language == 'zh->en':
|
elif language == 'zh->en':
|
||||||
inputs_array = [f"This is a Markdown file, translate it into English, do NOT modify any existing Markdown commands, do NOT use code wrapper (```), ONLY answer me with translated results:" +
|
inputs_array = [f"This is a Markdown file, translate it into English, do not modify any existing Markdown commands:" +
|
||||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||||
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
|
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
|
||||||
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
||||||
else:
|
else:
|
||||||
inputs_array = [f"This is a Markdown file, translate it into {language}, do NOT modify any existing Markdown commands, do NOT use code wrapper (```), ONLY answer me with translated results:" +
|
inputs_array = [f"This is a Markdown file, translate it into {language}, do not modify any existing Markdown commands, only answer me with translated results:" +
|
||||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||||
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
|
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
|
||||||
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
||||||
@@ -99,12 +99,7 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|||||||
for i_say, gpt_say in zip(gpt_response_collection[0::2], gpt_response_collection[1::2]):
|
for i_say, gpt_say in zip(gpt_response_collection[0::2], gpt_response_collection[1::2]):
|
||||||
pfg.sp_file_result.append(gpt_say)
|
pfg.sp_file_result.append(gpt_say)
|
||||||
pfg.merge_result()
|
pfg.merge_result()
|
||||||
output_file_arr = pfg.write_result(language)
|
pfg.write_result(language)
|
||||||
for output_file in output_file_arr:
|
|
||||||
promote_file_to_downloadzone(output_file, chatbot=chatbot)
|
|
||||||
if 'markdown_expected_output_path' in plugin_kwargs:
|
|
||||||
expected_f_name = plugin_kwargs['markdown_expected_output_path']
|
|
||||||
shutil.copyfile(output_file, expected_f_name)
|
|
||||||
except:
|
except:
|
||||||
logging.error(trimmed_format_exc())
|
logging.error(trimmed_format_exc())
|
||||||
|
|
||||||
@@ -164,6 +159,7 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
"函数插件功能?",
|
"函数插件功能?",
|
||||||
"对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
|
"对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
disable_auto_promotion(chatbot)
|
||||||
|
|
||||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
try:
|
try:
|
||||||
@@ -203,6 +199,7 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
"函数插件功能?",
|
"函数插件功能?",
|
||||||
"对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
|
"对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
disable_auto_promotion(chatbot)
|
||||||
|
|
||||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
try:
|
try:
|
||||||
@@ -235,6 +232,7 @@ def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history,
|
|||||||
"函数插件功能?",
|
"函数插件功能?",
|
||||||
"对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
|
"对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
disable_auto_promotion(chatbot)
|
||||||
|
|
||||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -1,11 +1,9 @@
|
|||||||
from toolbox import CatchException, report_exception, get_log_folder, gen_time_str, check_packages
|
from toolbox import CatchException, report_exception, get_log_folder, gen_time_str, check_packages
|
||||||
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
|
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
|
||||||
from toolbox import write_history_to_file, promote_file_to_downloadzone, get_conf, extract_archive
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
from toolbox import generate_file_link, zip_folder, trimmed_format_exc, trimmed_format_exc_markdown
|
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||||
from .crazy_utils import read_and_clean_pdf_text
|
from .crazy_utils import read_and_clean_pdf_text
|
||||||
from .crazy_utils import get_files_from_everything
|
|
||||||
from .pdf_fns.parse_pdf import parse_pdf, get_avail_grobid_url, translate_pdf
|
from .pdf_fns.parse_pdf import parse_pdf, get_avail_grobid_url, translate_pdf
|
||||||
from colorful import *
|
from colorful import *
|
||||||
import os
|
import os
|
||||||
@@ -16,7 +14,9 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
|
|
||||||
disable_auto_promotion(chatbot)
|
disable_auto_promotion(chatbot)
|
||||||
# 基本信息:功能、贡献者
|
# 基本信息:功能、贡献者
|
||||||
chatbot.append([None, "插件功能:批量翻译PDF文档。函数插件贡献者: Binary-Husky"])
|
chatbot.append([
|
||||||
|
"函数插件功能?",
|
||||||
|
"批量翻译PDF文档。函数插件贡献者: Binary-Husky"])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
@@ -32,6 +32,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
# 清空历史,以免输入溢出
|
# 清空历史,以免输入溢出
|
||||||
history = []
|
history = []
|
||||||
|
|
||||||
|
from .crazy_utils import get_files_from_everything
|
||||||
success, file_manifest, project_folder = get_files_from_everything(txt, type='.pdf')
|
success, file_manifest, project_folder = get_files_from_everything(txt, type='.pdf')
|
||||||
# 检测输入参数,如没有给定输入参数,直接退出
|
# 检测输入参数,如没有给定输入参数,直接退出
|
||||||
if not success:
|
if not success:
|
||||||
@@ -45,162 +46,14 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
return
|
return
|
||||||
|
|
||||||
# 开始正式执行任务
|
# 开始正式执行任务
|
||||||
DOC2X_API_KEY = get_conf("DOC2X_API_KEY")
|
|
||||||
# ------- 第一种方法,效果最好,但是需要DOC2X服务 -------
|
|
||||||
if len(DOC2X_API_KEY) != 0:
|
|
||||||
try:
|
|
||||||
yield from 解析PDF_DOC2X(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, DOC2X_API_KEY, user_request)
|
|
||||||
return
|
|
||||||
except:
|
|
||||||
chatbot.append([None, f"DOC2X服务不可用,现在将执行效果稍差的旧版代码。{trimmed_format_exc_markdown()}"])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
|
||||||
|
|
||||||
# ------- 第二种方法,效果次优 -------
|
|
||||||
grobid_url = get_avail_grobid_url()
|
grobid_url = get_avail_grobid_url()
|
||||||
if grobid_url is not None:
|
if grobid_url is not None:
|
||||||
yield from 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url)
|
yield from 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url)
|
||||||
return
|
else:
|
||||||
|
|
||||||
# ------- 第三种方法,早期代码,效果不理想 -------
|
|
||||||
yield from update_ui_lastest_msg("GROBID服务不可用,请检查config中的GROBID_URL。作为替代,现在将执行效果稍差的旧版代码。", chatbot, history, delay=3)
|
yield from update_ui_lastest_msg("GROBID服务不可用,请检查config中的GROBID_URL。作为替代,现在将执行效果稍差的旧版代码。", chatbot, history, delay=3)
|
||||||
yield from 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def 解析PDF_DOC2X_单文件(fp, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, DOC2X_API_KEY, user_request):
|
|
||||||
|
|
||||||
def pdf2markdown(filepath):
|
|
||||||
import requests, json, os
|
|
||||||
markdown_dir = get_log_folder(plugin_name="pdf_ocr")
|
|
||||||
doc2x_api_key = DOC2X_API_KEY
|
|
||||||
if doc2x_api_key.startswith('sk-'):
|
|
||||||
url = "https://api.doc2x.noedgeai.com/api/v1/pdf"
|
|
||||||
else:
|
|
||||||
url = "https://api.doc2x.noedgeai.com/api/platform/pdf"
|
|
||||||
|
|
||||||
chatbot.append((None, "加载PDF文件,发送至DOC2X解析..."))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
|
|
||||||
res = requests.post(
|
|
||||||
url,
|
|
||||||
files={"file": open(filepath, "rb")},
|
|
||||||
data={"ocr": "1"},
|
|
||||||
headers={"Authorization": "Bearer " + doc2x_api_key}
|
|
||||||
)
|
|
||||||
res_json = []
|
|
||||||
if res.status_code == 200:
|
|
||||||
decoded = res.content.decode("utf-8")
|
|
||||||
for z_decoded in decoded.split('\n'):
|
|
||||||
if len(z_decoded) == 0: continue
|
|
||||||
assert z_decoded.startswith("data: ")
|
|
||||||
z_decoded = z_decoded[len("data: "):]
|
|
||||||
decoded_json = json.loads(z_decoded)
|
|
||||||
res_json.append(decoded_json)
|
|
||||||
else:
|
|
||||||
raise RuntimeError(format("[ERROR] status code: %d, body: %s" % (res.status_code, res.text)))
|
|
||||||
uuid = res_json[0]['uuid']
|
|
||||||
to = "md" # latex, md, docx
|
|
||||||
url = "https://api.doc2x.noedgeai.com/api/export"+"?request_id="+uuid+"&to="+to
|
|
||||||
|
|
||||||
chatbot.append((None, f"读取解析: {url} ..."))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
|
|
||||||
res = requests.get(url, headers={"Authorization": "Bearer " + doc2x_api_key})
|
|
||||||
md_zip_path = os.path.join(markdown_dir, gen_time_str() + '.zip')
|
|
||||||
if res.status_code == 200:
|
|
||||||
with open(md_zip_path, "wb") as f: f.write(res.content)
|
|
||||||
else:
|
|
||||||
raise RuntimeError(format("[ERROR] status code: %d, body: %s" % (res.status_code, res.text)))
|
|
||||||
promote_file_to_downloadzone(md_zip_path, chatbot=chatbot)
|
|
||||||
chatbot.append((None, f"完成解析 {md_zip_path} ..."))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
return md_zip_path
|
|
||||||
|
|
||||||
def deliver_to_markdown_plugin(md_zip_path, user_request):
|
|
||||||
from crazy_functions.批量Markdown翻译 import Markdown英译中
|
|
||||||
import shutil, re
|
|
||||||
|
|
||||||
time_tag = gen_time_str()
|
|
||||||
target_path_base = get_log_folder(chatbot.get_user())
|
|
||||||
file_origin_name = os.path.basename(md_zip_path)
|
|
||||||
this_file_path = os.path.join(target_path_base, file_origin_name)
|
|
||||||
os.makedirs(target_path_base, exist_ok=True)
|
|
||||||
shutil.copyfile(md_zip_path, this_file_path)
|
|
||||||
ex_folder = this_file_path + ".extract"
|
|
||||||
extract_archive(
|
|
||||||
file_path=this_file_path, dest_dir=ex_folder
|
|
||||||
)
|
|
||||||
|
|
||||||
# edit markdown files
|
|
||||||
success, file_manifest, project_folder = get_files_from_everything(ex_folder, type='.md')
|
|
||||||
for generated_fp in file_manifest:
|
|
||||||
# 修正一些公式问题
|
|
||||||
with open(generated_fp, 'r', encoding='utf8') as f:
|
|
||||||
content = f.read()
|
|
||||||
# 将公式中的\[ \]替换成$$
|
|
||||||
content = content.replace(r'\[', r'$$').replace(r'\]', r'$$')
|
|
||||||
# 将公式中的\( \)替换成$
|
|
||||||
content = content.replace(r'\(', r'$').replace(r'\)', r'$')
|
|
||||||
content = content.replace('```markdown', '\n').replace('```', '\n')
|
|
||||||
with open(generated_fp, 'w', encoding='utf8') as f:
|
|
||||||
f.write(content)
|
|
||||||
promote_file_to_downloadzone(generated_fp, chatbot=chatbot)
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
|
|
||||||
# 生成在线预览html
|
|
||||||
file_name = '在线预览翻译(原文)' + gen_time_str() + '.html'
|
|
||||||
preview_fp = os.path.join(ex_folder, file_name)
|
|
||||||
from shared_utils.advanced_markdown_format import markdown_convertion_for_file
|
|
||||||
with open(generated_fp, "r", encoding="utf-8") as f:
|
|
||||||
md = f.read()
|
|
||||||
# Markdown中使用不标准的表格,需要在表格前加上一个emoji,以便公式渲染
|
|
||||||
md = re.sub(r'^<table>', r'😃<table>', md, flags=re.MULTILINE)
|
|
||||||
html = markdown_convertion_for_file(md)
|
|
||||||
with open(preview_fp, "w", encoding="utf-8") as f: f.write(html)
|
|
||||||
chatbot.append([None, f"生成在线预览:{generate_file_link([preview_fp])}"])
|
|
||||||
promote_file_to_downloadzone(preview_fp, chatbot=chatbot)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
chatbot.append((None, f"调用Markdown插件 {ex_folder} ..."))
|
|
||||||
plugin_kwargs['markdown_expected_output_dir'] = ex_folder
|
|
||||||
|
|
||||||
translated_f_name = 'translated_markdown.md'
|
|
||||||
generated_fp = plugin_kwargs['markdown_expected_output_path'] = os.path.join(ex_folder, translated_f_name)
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
yield from Markdown英译中(ex_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
|
|
||||||
if os.path.exists(generated_fp):
|
|
||||||
# 修正一些公式问题
|
|
||||||
with open(generated_fp, 'r', encoding='utf8') as f: content = f.read()
|
|
||||||
content = content.replace('```markdown', '\n').replace('```', '\n')
|
|
||||||
# Markdown中使用不标准的表格,需要在表格前加上一个emoji,以便公式渲染
|
|
||||||
content = re.sub(r'^<table>', r'😃<table>', content, flags=re.MULTILINE)
|
|
||||||
with open(generated_fp, 'w', encoding='utf8') as f: f.write(content)
|
|
||||||
# 生成在线预览html
|
|
||||||
file_name = '在线预览翻译' + gen_time_str() + '.html'
|
|
||||||
preview_fp = os.path.join(ex_folder, file_name)
|
|
||||||
from shared_utils.advanced_markdown_format import markdown_convertion_for_file
|
|
||||||
with open(generated_fp, "r", encoding="utf-8") as f:
|
|
||||||
md = f.read()
|
|
||||||
html = markdown_convertion_for_file(md)
|
|
||||||
with open(preview_fp, "w", encoding="utf-8") as f: f.write(html)
|
|
||||||
promote_file_to_downloadzone(preview_fp, chatbot=chatbot)
|
|
||||||
# 生成包含图片的压缩包
|
|
||||||
dest_folder = get_log_folder(chatbot.get_user())
|
|
||||||
zip_name = '翻译后的带图文档.zip'
|
|
||||||
zip_folder(source_folder=ex_folder, dest_folder=dest_folder, zip_name=zip_name)
|
|
||||||
zip_fp = os.path.join(dest_folder, zip_name)
|
|
||||||
promote_file_to_downloadzone(zip_fp, chatbot=chatbot)
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
md_zip_path = yield from pdf2markdown(fp)
|
|
||||||
yield from deliver_to_markdown_plugin(md_zip_path, user_request)
|
|
||||||
|
|
||||||
def 解析PDF_DOC2X(file_manifest, *args):
|
|
||||||
for index, fp in enumerate(file_manifest):
|
|
||||||
yield from 解析PDF_DOC2X_单文件(fp, *args)
|
|
||||||
return
|
|
||||||
|
|
||||||
def 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url):
|
def 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url):
|
||||||
import copy, json
|
import copy, json
|
||||||
TOKEN_LIMIT_PER_FRAGMENT = 1024
|
TOKEN_LIMIT_PER_FRAGMENT = 1024
|
||||||
@@ -345,12 +345,9 @@ def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
|||||||
pattern_except_suffix = [_.lstrip(" ^*.,").rstrip(" ,") for _ in txt_pattern.split(" ") if _ != "" and _.strip().startswith("^*.")]
|
pattern_except_suffix = [_.lstrip(" ^*.,").rstrip(" ,") for _ in txt_pattern.split(" ") if _ != "" and _.strip().startswith("^*.")]
|
||||||
pattern_except_suffix += ['zip', 'rar', '7z', 'tar', 'gz'] # 避免解析压缩文件
|
pattern_except_suffix += ['zip', 'rar', '7z', 'tar', 'gz'] # 避免解析压缩文件
|
||||||
# 将要忽略匹配的文件名(例如: ^README.md)
|
# 将要忽略匹配的文件名(例如: ^README.md)
|
||||||
pattern_except_name = [_.lstrip(" ^*,").rstrip(" ,").replace(".", r"\.") # 移除左边通配符,移除右侧逗号,转义点号
|
pattern_except_name = [_.lstrip(" ^*,").rstrip(" ,").replace(".", "\.") for _ in txt_pattern.split(" ") if _ != "" and _.strip().startswith("^") and not _.strip().startswith("^*.")]
|
||||||
for _ in txt_pattern.split(" ") # 以空格分割
|
|
||||||
if (_ != "" and _.strip().startswith("^") and not _.strip().startswith("^*.")) # ^开始,但不是^*.开始
|
|
||||||
]
|
|
||||||
# 生成正则表达式
|
# 生成正则表达式
|
||||||
pattern_except = r'/[^/]+\.(' + "|".join(pattern_except_suffix) + ')$'
|
pattern_except = '/[^/]+\.(' + "|".join(pattern_except_suffix) + ')$'
|
||||||
pattern_except += '|/(' + "|".join(pattern_except_name) + ')$' if pattern_except_name != [] else ''
|
pattern_except += '|/(' + "|".join(pattern_except_name) + ')$' if pattern_except_name != [] else ''
|
||||||
|
|
||||||
history.clear()
|
history.clear()
|
||||||
|
|||||||
28
crazy_functions/辅助回答.py
普通文件
28
crazy_functions/辅助回答.py
普通文件
@@ -0,0 +1,28 @@
|
|||||||
|
# encoding: utf-8
|
||||||
|
# @Time : 2023/4/19
|
||||||
|
# @Author : Spike
|
||||||
|
# @Descr :
|
||||||
|
from toolbox import update_ui
|
||||||
|
from toolbox import CatchException, report_execption, write_results_to_file
|
||||||
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
|
|
||||||
|
|
||||||
|
@CatchException
|
||||||
|
def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
if txt:
|
||||||
|
show_say = txt
|
||||||
|
prompt = txt+'\n回答完问题后,再列出用户可能提出的三个问题。'
|
||||||
|
else:
|
||||||
|
prompt = history[-1]+"\n分析上述回答,再列出用户可能提出的三个问题。"
|
||||||
|
show_say = '分析上述回答,再列出用户可能提出的三个问题。'
|
||||||
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
inputs=prompt,
|
||||||
|
inputs_show_user=show_say,
|
||||||
|
llm_kwargs=llm_kwargs,
|
||||||
|
chatbot=chatbot,
|
||||||
|
history=history,
|
||||||
|
sys_prompt=system_prompt
|
||||||
|
)
|
||||||
|
chatbot[-1] = (show_say, gpt_say)
|
||||||
|
history.extend([show_say, gpt_say])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -4,9 +4,9 @@
|
|||||||
# 1. 请在以下方案中选择任意一种,然后删除其他的方案
|
# 1. 请在以下方案中选择任意一种,然后删除其他的方案
|
||||||
# 2. 修改你选择的方案中的environment环境变量,详情请见github wiki或者config.py
|
# 2. 修改你选择的方案中的environment环境变量,详情请见github wiki或者config.py
|
||||||
# 3. 选择一种暴露服务端口的方法,并对相应的配置做出修改:
|
# 3. 选择一种暴露服务端口的方法,并对相应的配置做出修改:
|
||||||
# 「方法1: 适用于Linux,很方便,可惜windows不支持」与宿主的网络融合为一体,这个是默认配置
|
# 【方法1: 适用于Linux,很方便,可惜windows不支持】与宿主的网络融合为一体,这个是默认配置
|
||||||
# network_mode: "host"
|
# network_mode: "host"
|
||||||
# 「方法2: 适用于所有系统包括Windows和MacOS」端口映射,把容器的端口映射到宿主的端口(注意您需要先删除network_mode: "host",再追加以下内容)
|
# 【方法2: 适用于所有系统包括Windows和MacOS】端口映射,把容器的端口映射到宿主的端口(注意您需要先删除network_mode: "host",再追加以下内容)
|
||||||
# ports:
|
# ports:
|
||||||
# - "12345:12345" # 注意!12345必须与WEB_PORT环境变量相互对应
|
# - "12345:12345" # 注意!12345必须与WEB_PORT环境变量相互对应
|
||||||
# 4. 最后`docker-compose up`运行
|
# 4. 最后`docker-compose up`运行
|
||||||
@@ -25,7 +25,7 @@
|
|||||||
## ===================================================
|
## ===================================================
|
||||||
|
|
||||||
## ===================================================
|
## ===================================================
|
||||||
## 「方案零」 部署项目的全部能力(这个是包含cuda和latex的大型镜像。如果您网速慢、硬盘小或没有显卡,则不推荐使用这个)
|
## 【方案零】 部署项目的全部能力(这个是包含cuda和latex的大型镜像。如果您网速慢、硬盘小或没有显卡,则不推荐使用这个)
|
||||||
## ===================================================
|
## ===================================================
|
||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
@@ -63,10 +63,10 @@ services:
|
|||||||
# count: 1
|
# count: 1
|
||||||
# capabilities: [gpu]
|
# capabilities: [gpu]
|
||||||
|
|
||||||
# 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
|
# 【WEB_PORT暴露方法1: 适用于Linux】与宿主的网络融合
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
|
|
||||||
# 「WEB_PORT暴露方法2: 适用于所有系统」端口映射
|
# 【WEB_PORT暴露方法2: 适用于所有系统】端口映射
|
||||||
# ports:
|
# ports:
|
||||||
# - "12345:12345" # 12345必须与WEB_PORT相互对应
|
# - "12345:12345" # 12345必须与WEB_PORT相互对应
|
||||||
|
|
||||||
@@ -75,8 +75,10 @@ services:
|
|||||||
bash -c "python3 -u main.py"
|
bash -c "python3 -u main.py"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## ===================================================
|
## ===================================================
|
||||||
## 「方案一」 如果不需要运行本地模型(仅 chatgpt, azure, 星火, 千帆, claude 等在线大模型服务)
|
## 【方案一】 如果不需要运行本地模型(仅 chatgpt, azure, 星火, 千帆, claude 等在线大模型服务)
|
||||||
## ===================================================
|
## ===================================================
|
||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
@@ -95,16 +97,16 @@ services:
|
|||||||
# DEFAULT_WORKER_NUM: ' 10 '
|
# DEFAULT_WORKER_NUM: ' 10 '
|
||||||
# AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] '
|
# AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] '
|
||||||
|
|
||||||
# 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
|
# 与宿主的网络融合
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
|
|
||||||
# 启动命令
|
# 不使用代理网络拉取最新代码
|
||||||
command: >
|
command: >
|
||||||
bash -c "python3 -u main.py"
|
bash -c "python3 -u main.py"
|
||||||
|
|
||||||
|
|
||||||
### ===================================================
|
### ===================================================
|
||||||
### 「方案二」 如果需要运行ChatGLM + Qwen + MOSS等本地模型
|
### 【方案二】 如果需要运行ChatGLM + Qwen + MOSS等本地模型
|
||||||
### ===================================================
|
### ===================================================
|
||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
@@ -128,10 +130,8 @@ services:
|
|||||||
devices:
|
devices:
|
||||||
- /dev/nvidia0:/dev/nvidia0
|
- /dev/nvidia0:/dev/nvidia0
|
||||||
|
|
||||||
# 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
|
# 与宿主的网络融合
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
|
|
||||||
# 启动命令
|
|
||||||
command: >
|
command: >
|
||||||
bash -c "python3 -u main.py"
|
bash -c "python3 -u main.py"
|
||||||
|
|
||||||
@@ -139,9 +139,8 @@ services:
|
|||||||
# command: >
|
# command: >
|
||||||
# bash -c "pip install -r request_llms/requirements_qwen.txt && python3 -u main.py"
|
# bash -c "pip install -r request_llms/requirements_qwen.txt && python3 -u main.py"
|
||||||
|
|
||||||
|
|
||||||
### ===================================================
|
### ===================================================
|
||||||
### 「方案三」 如果需要运行ChatGPT + LLAMA + 盘古 + RWKV本地模型
|
### 【方案三】 如果需要运行ChatGPT + LLAMA + 盘古 + RWKV本地模型
|
||||||
### ===================================================
|
### ===================================================
|
||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
@@ -165,16 +164,16 @@ services:
|
|||||||
devices:
|
devices:
|
||||||
- /dev/nvidia0:/dev/nvidia0
|
- /dev/nvidia0:/dev/nvidia0
|
||||||
|
|
||||||
# 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
|
# 与宿主的网络融合
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
|
|
||||||
# 启动命令
|
# 不使用代理网络拉取最新代码
|
||||||
command: >
|
command: >
|
||||||
python3 -u main.py
|
python3 -u main.py
|
||||||
|
|
||||||
|
|
||||||
## ===================================================
|
## ===================================================
|
||||||
## 「方案四」 ChatGPT + Latex
|
## 【方案四】 ChatGPT + Latex
|
||||||
## ===================================================
|
## ===================================================
|
||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
@@ -191,16 +190,16 @@ services:
|
|||||||
DEFAULT_WORKER_NUM: ' 10 '
|
DEFAULT_WORKER_NUM: ' 10 '
|
||||||
WEB_PORT: ' 12303 '
|
WEB_PORT: ' 12303 '
|
||||||
|
|
||||||
# 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
|
# 与宿主的网络融合
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
|
|
||||||
# 启动命令
|
# 不使用代理网络拉取最新代码
|
||||||
command: >
|
command: >
|
||||||
bash -c "python3 -u main.py"
|
bash -c "python3 -u main.py"
|
||||||
|
|
||||||
|
|
||||||
## ===================================================
|
## ===================================================
|
||||||
## 「方案五」 ChatGPT + 语音助手 (请先阅读 docs/use_audio.md)
|
## 【方案五】 ChatGPT + 语音助手 (请先阅读 docs/use_audio.md)
|
||||||
## ===================================================
|
## ===================================================
|
||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
@@ -224,9 +223,9 @@ services:
|
|||||||
# (无需填写) ALIYUN_ACCESSKEY: ' LTAI5q6BrFUzoRXVGUWnekh1 '
|
# (无需填写) ALIYUN_ACCESSKEY: ' LTAI5q6BrFUzoRXVGUWnekh1 '
|
||||||
# (无需填写) ALIYUN_SECRET: ' eHmI20AVWIaQZ0CiTD2bGQVsaP9i68 '
|
# (无需填写) ALIYUN_SECRET: ' eHmI20AVWIaQZ0CiTD2bGQVsaP9i68 '
|
||||||
|
|
||||||
# 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
|
# 与宿主的网络融合
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
|
|
||||||
# 启动命令
|
# 不使用代理网络拉取最新代码
|
||||||
command: >
|
command: >
|
||||||
bash -c "python3 -u main.py"
|
bash -c "python3 -u main.py"
|
||||||
|
|||||||
@@ -10,6 +10,9 @@ ENV PATH "$PATH:/usr/local/texlive/2024/bin/x86_64-linux"
|
|||||||
ENV PATH "$PATH:/usr/local/texlive/2025/bin/x86_64-linux"
|
ENV PATH "$PATH:/usr/local/texlive/2025/bin/x86_64-linux"
|
||||||
ENV PATH "$PATH:/usr/local/texlive/2026/bin/x86_64-linux"
|
ENV PATH "$PATH:/usr/local/texlive/2026/bin/x86_64-linux"
|
||||||
|
|
||||||
|
# 删除文档文件以节约空间
|
||||||
|
RUN rm -rf /usr/local/texlive/2023/texmf-dist/doc
|
||||||
|
|
||||||
# 指定路径
|
# 指定路径
|
||||||
WORKDIR /gpt
|
WORKDIR /gpt
|
||||||
|
|
||||||
|
|||||||
307
docs/README.md.German.md
普通文件
307
docs/README.md.German.md
普通文件
@@ -0,0 +1,307 @@
|
|||||||
|
> **Hinweis**
|
||||||
|
>
|
||||||
|
> Bei der Installation von Abhängigkeiten sollten nur die in **requirements.txt** **angegebenen Versionen** streng ausgewählt werden.
|
||||||
|
>
|
||||||
|
> `pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/`
|
||||||
|
|
||||||
|
# <img src="docs/logo.png" width="40" > GPT Akademisch optimiert (GPT Academic)
|
||||||
|
|
||||||
|
**Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Stern; wenn Sie bessere Tastenkombinationen oder Funktions-Plugins entwickelt haben, können Sie gerne einen Pull Request eröffnen.**
|
||||||
|
|
||||||
|
Wenn Sie dieses Projekt mögen, geben Sie ihm bitte einen Stern. Wenn Sie weitere nützliche wissenschaftliche Abkürzungen oder funktionale Plugins entwickelt haben, können Sie gerne ein Problem oder eine Pull-Anforderung öffnen. Wir haben auch ein README in [Englisch|](docs/README_EN.md)[日本語|](docs/README_JP.md)[한국어|](https://github.com/mldljyh/ko_gpt_academic)[Русский|](docs/README_RS.md)[Français](docs/README_FR.md), das von diesem Projekt selbst übersetzt wurde.
|
||||||
|
Um dieses Projekt in eine beliebige Sprache mit GPT zu übersetzen, lesen Sie `multi_language.py` (experimentell).
|
||||||
|
|
||||||
|
> **Hinweis**
|
||||||
|
>
|
||||||
|
> 1. Beachten Sie bitte, dass nur Funktionserweiterungen (Schaltflächen) mit **roter Farbe** Dateien lesen können und einige Erweiterungen im **Dropdown-Menü** des Erweiterungsbereichs zu finden sind. Außerdem begrüßen wir jede neue Funktionserweiterung mit **höchster Priorität** und bearbeiten sie.
|
||||||
|
>
|
||||||
|
> 2. Die Funktionalität jeder Datei in diesem Projekt wird in der Selbstanalyse [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) detailliert beschrieben. Mit der Weiterentwicklung der Versionen können Sie jederzeit die zugehörigen Funktions-Erweiterungen aufrufen, um durch Aufruf von GPT einen Selbstanalysebericht des Projekts zu erstellen. Häufig gestellte Fragen finden Sie in der [`Wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Installationsanweisungen](#Installation).
|
||||||
|
>
|
||||||
|
> 3. Dieses Projekt ist kompatibel und fördert die Verwendung von inländischen Sprachmodellen wie ChatGLM und RWKV, Pangu, etc. Es unterstützt das Vorhandensein mehrerer api-keys, die in der Konfigurationsdatei wie folgt angegeben werden können: `API_KEY="openai-key1,openai-key2,api2d-key3"`. Wenn ein `API_KEY` temporär geändert werden muss, geben Sie den temporären `API_KEY` im Eingabebereich ein und drücken Sie dann die Eingabetaste, um ihn zu übernehmen.Funktion | Beschreibung
|
||||||
|
--- | ---
|
||||||
|
Ein-Klick-Polieren | Unterstützt ein-Klick-Polieren und ein-Klick-Suche nach grammatikalischen Fehlern in wissenschaftlichen Arbeiten
|
||||||
|
Ein-Klick Chinesisch-Englisch Übersetzung | Ein-Klick Chinesisch-Englisch Übersetzung
|
||||||
|
Ein-Klick-Code-Erklärung | Zeigt Code, erklärt Code, erzeugt Code und fügt Kommentare zum Code hinzu
|
||||||
|
[Benutzerdefinierte Tastenkombinationen](https://www.bilibili.com/video/BV14s4y1E7jN) | Unterstützt benutzerdefinierte Tastenkombinationen
|
||||||
|
Modulare Gestaltung | Unterstützt leistungsstarke individuelle [Funktions-Plugins](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions). Plugins unterstützen [Hot-Updates](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
||||||
|
[Selbstprogramm-Analyse](https://www.bilibili.com/video/BV1cj411A7VW) | [Funktions-Plugin] [Ein-Klick Verstehen](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) der Quellcode dieses Projekts
|
||||||
|
[Programmanalyse](https://www.bilibili.com/video/BV1cj411A7VW) | [Funktions-Plugin] Ein-Klick-Analyse des Projektbaums anderer Python/C/C++/Java/Lua/...-Projekte
|
||||||
|
Lesen von Papieren, [Übersetzen](https://www.bilibili.com/video/BV1KT411x7Wn) von Papieren | [Funktions-Plugin] Ein-Klick Erklärung des gesamten LaTeX/PDF-Artikels und Erstellung einer Zusammenfassung
|
||||||
|
LaTeX-Volltext-Übersetzung und [Polieren](https://www.bilibili.com/video/BV1FT411H7c5/) | [Funktions-Plugin] Ein-Klick-Übersetzung oder-Polieren des LaTeX-Artikels
|
||||||
|
Bulk-Kommentargenerierung | [Funktions-Plugin] Ein-Klick Massenerstellung von Funktionskommentaren
|
||||||
|
Markdown [Chinesisch-Englisch Übersetzung](https://www.bilibili.com/video/BV1yo4y157jV/) | [Funktions-Plugin] Haben Sie die [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) in den oben genannten 5 Sprachen gesehen?
|
||||||
|
Analyse-Berichtserstellung von chat | [Funktions-Plugin] Automatische Zusammenfassung nach der Ausführung
|
||||||
|
[Funktion zur vollständigen Übersetzung von PDF-Artikeln](https://www.bilibili.com/video/BV1KT411x7Wn) | [Funktions-Plugin] Extrahiert Titel und Zusammenfassung der PDF-Artikel und übersetzt den gesamten Text (mehrere Threads)
|
||||||
|
[Arxiv-Assistent](https://www.bilibili.com/video/BV1LM4y1279X) | [Funktions-Plugin] Geben Sie die Arxiv-Artikel-URL ein und klicken Sie auf Eine-Klick-Übersetzung-Zusammenfassung + PDF-Download
|
||||||
|
[Google Scholar Integrations-Assistent](https://www.bilibili.com/video/BV19L411U7ia) | [Funktions-Plugin] Geben Sie eine beliebige Google Scholar Such-URL ein und lassen Sie gpt Ihnen bei der Erstellung von [relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/) helfen
|
||||||
|
Internet-Informationen Aggregation + GPT | [Funktions-Plugin] Lassen Sie GPT eine Frage beantworten, indem es [zuerst Informationen aus dem Internet](https://www.bilibili.com/video/BV1om4y127ck/) sammelt und so die Informationen nie veralten
|
||||||
|
Anzeige von Formeln / Bildern / Tabellen | Zeigt Formeln in beiden Formen, [TeX-Format und gerendeter Form](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), unterstützt Formeln und Code-Highlights
|
||||||
|
Unterstützung von PlugIns mit mehreren Threads | Unterstützt den Aufruf mehrerer Threads in Chatgpt, um Text oder Programme [Batch zu verarbeiten](https://www.bilibili.com/video/BV1FT411H7c5/)
|
||||||
|
Starten Sie das dunkle Gradio-[Thema](https://github.com/binary-husky/gpt_academic/issues/173) | Fügen Sie ```/?__theme=dark``` an das Ende der Browser-URL an, um das dunkle Thema zu aktivieren
|
||||||
|
[Unterstützung für mehrere LLM-Modelle](https://www.bilibili.com/video/BV1wT411p7yf), [API2D](https://api2d.com/) Interface-Unterstützung | Das Gefühl, gleichzeitig von GPT3.5, GPT4, [Tshinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS) bedient zu werden, muss toll sein, oder?
|
||||||
|
Zugriff auf weitere LLM-Modelle, Unterstützung von [huggingface deployment](https://huggingface.co/spaces/qingxu98/gpt-academic) | Hinzufügen der Newbing-Schnittstelle (neues Bing), Einführung der Unterstützung von [Jittorllms](https://github.com/Jittor/JittorLLMs) der Tsinghua-Universität, [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) und [Pangu alpha](https://openi.org.cn/pangu/)
|
||||||
|
Weitere neue Funktionen (wie Bildgenerierung) …… | Siehe Ende dieses Dokuments ……
|
||||||
|
|
||||||
|
- Neue Oberfläche (Ändern Sie die LAYOUT-Option in `config.py`, um zwischen "Seitenlayout" und "Oben-unten-Layout" zu wechseln)
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||||
|
</div>- All buttons are dynamically generated by reading `functional.py`, and custom functions can be easily added, freeing up the clipboard.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Proofreading/Correcting
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- If the output contains formulas, they will be displayed in both tex format and rendered format for easy copying and reading.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Don't feel like reading the project code? Show off the entire project to chatgpt.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Multiple large language models are mixed and called together (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4).
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
---
|
||||||
|
# Installation
|
||||||
|
## Installation-Method 1: Run directly (Windows, Linux or MacOS)
|
||||||
|
|
||||||
|
1. Download the project
|
||||||
|
```sh
|
||||||
|
git clone https://github.com/binary-husky/gpt_academic.git
|
||||||
|
cd gpt_academic
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Configure API_KEY
|
||||||
|
|
||||||
|
Configure API KEY and other settings in `config.py`. [Special Network Environment Settings](https://github.com/binary-husky/gpt_academic/issues/1).
|
||||||
|
|
||||||
|
(P.S. When the program is running, it will first check whether there is a "config_private.py" private configuration file, and use the configuration defined in it to override the configuration of "config.py". Therefore, if you understand our configuration reading logic, we strongly recommend that you create a new configuration file named "config_private.py" next to "config.py" and transfer (copy) the configurations in "config.py" to "config_private.py". "config_private.py" is not controlled by git, which can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`, and the writing format of environment variables refers to the `docker-compose` file. Reading priority: `environment variable` > `config_private.py` >`config.py`)
|
||||||
|
|
||||||
|
|
||||||
|
3. Install dependencies
|
||||||
|
```sh
|
||||||
|
# (Option I: If familar with Python) (Python version 3.9 or above, the newer the better), Note: Use the official pip source or Ali pip source, temporary switching method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||||
|
python -m pip install -r requirements.txt
|
||||||
|
|
||||||
|
# (Option II: If not familiar with Python) Use anaconda with similar steps (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||||
|
conda create -n gptac_venv python=3.11 # Create an anaconda environment
|
||||||
|
conda activate gptac_venv # Activate the anaconda environment
|
||||||
|
python -m pip install -r requirements.txt # Same step as pip installation
|
||||||
|
```
|
||||||
|
|
||||||
|
<details><summary>Click to expand if supporting Tsinghua ChatGLM/Fudan MOSS as backend</summary>
|
||||||
|
<p>
|
||||||
|
|
||||||
|
[Optional Step] If supporting Tsinghua ChatGLM/Fudan MOSS as backend, additional dependencies need to be installed (Prerequisites: Familiar with Python + Used Pytorch + Sufficient computer configuration):
|
||||||
|
```sh
|
||||||
|
# [Optional Step I] Support Tsinghua ChatGLM. Remark: If encountering "Call ChatGLM fail Cannot load ChatGLM parameters", please refer to the following: 1: The above default installation is torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2: If the model cannot be loaded due to insufficient machine configuration, you can modify the model precision in `request_llm/bridge_chatglm.py`, and modify all AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||||
|
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||||
|
|
||||||
|
# [Optional Step II] Support Fudan MOSS
|
||||||
|
python -m pip install -r request_llm/requirements_moss.txt
|
||||||
|
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # When executing this line of code, you must be in the project root path
|
||||||
|
|
||||||
|
# [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently supported models are as follows (jittorllms series currently only supports docker solutions):
|
||||||
|
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||||
|
```
|
||||||
|
|
||||||
|
</p>
|
||||||
|
</details>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
4. Run
|
||||||
|
```sh
|
||||||
|
python main.py
|
||||||
|
```5. Testing Function Plugin
|
||||||
|
```
|
||||||
|
- Test function plugin template function (requires gpt to answer what happened today in history), you can use this function as a template to implement more complex functions
|
||||||
|
Click "[Function Plugin Template Demo] Today in History"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Installation-Method 2: Using Docker
|
||||||
|
|
||||||
|
1. Only ChatGPT (Recommended for most people)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
git clone https://github.com/binary-husky/gpt_academic.git # Download the project
|
||||||
|
cd gpt_academic # Enter the path
|
||||||
|
nano config.py # Edit config.py with any text editor, Configure "Proxy","API_KEY"and"WEB_PORT" (e.g 50923) etc.
|
||||||
|
docker build -t gpt-academic . # Install
|
||||||
|
|
||||||
|
# (Last step-option 1) Under Linux environment, use `--net=host` is more convenient and quick
|
||||||
|
docker run --rm -it --net=host gpt-academic
|
||||||
|
# (Last step-option 2) Under macOS/windows environment, can only use the -p option to expose the container's port(eg.50923) to the port on the host.
|
||||||
|
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
|
||||||
|
```
|
||||||
|
|
||||||
|
2. ChatGPT + ChatGLM + MOSS (Requires familiarity with Docker)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
# Modify docker-compose.yml, delete solution 1 and solution 3, and retain solution 2. Modify the configuration of solution 2 in docker-compose.yml, referring to the comments in it.
|
||||||
|
docker-compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
3. ChatGPT+LLAMA+Pangu+RWKV(Requires familiarity with Docker)
|
||||||
|
``` sh
|
||||||
|
# Modify docker-compose.yml, delete solution 1 and solution 2, and retain solution 3. Modify the configuration of solution 3 in docker-compose.yml, referring to the comments in it.
|
||||||
|
docker-compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Installation-Method 3: Other Deployment Options
|
||||||
|
|
||||||
|
1. How to use reverse proxy URL/Microsoft Azure API
|
||||||
|
Configure API_URL_REDIRECT according to the instructions in `config.py`.
|
||||||
|
|
||||||
|
2. Remote cloud server deployment (requires cloud server knowledge and experience)
|
||||||
|
Please visit [Deployment wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||||
|
|
||||||
|
3. Using WSL 2 (Windows subsystem for Linux)
|
||||||
|
Please visit [Deployment wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||||
|
|
||||||
|
4. How to run at a secondary URL (such as `http://localhost/subpath`)
|
||||||
|
Please visit [FastAPI operating instructions](docs/WithFastapi.md)
|
||||||
|
|
||||||
|
5. Use docker-compose to run
|
||||||
|
Please read docker-compose.yml and follow the prompts to operate.
|
||||||
|
|
||||||
|
---
|
||||||
|
# Advanced Usage
|
||||||
|
## Customize new convenience buttons / custom function plugins.
|
||||||
|
|
||||||
|
1. Customize new convenience buttons (Academic Shortcut Keys)
|
||||||
|
Open `core_functional.py` with any text editor, add an entry as follows, and then restart the program. (If the button has been added successfully and is visible, then the prefix and suffix can be hot-modified, and it will take effect without restarting the program.)
|
||||||
|
For example
|
||||||
|
```
|
||||||
|
"Super English to Chinese": {
|
||||||
|
# Prefix, will be added before your input. For example, used to describe your requirements, such as translation, explaining code, polishing, etc.
|
||||||
|
"Prefix": "Please translate the following content into Chinese, and then use a markdown table to explain the proper nouns that appear in the text one by one:\n\n",
|
||||||
|
|
||||||
|
# Suffix, will be added after your input. For example, combined with prefix, you can enclose your input content in quotes.
|
||||||
|
"Suffix": "",
|
||||||
|
},
|
||||||
|
```
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
2. Custom function plugins
|
||||||
|
|
||||||
|
Write powerful function plugins to perform any task you want and can't think of.
|
||||||
|
The difficulty of plugin writing and debugging is very low in this project. As long as you have a certain knowledge of Python, you can implement your own plugin functions by imitating the template we provided.
|
||||||
|
For more information, please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
|
||||||
|
|
||||||
|
---
|
||||||
|
# Latest Update
|
||||||
|
## New feature dynamics1. Funktion zur Speicherung von Dialogen. Rufen Sie im Bereich der Funktions-Plugins "Aktuellen Dialog speichern" auf, um den aktuellen Dialog als lesbares und wiederherstellbares HTML-Datei zu speichern. Darüber hinaus können Sie im Funktions-Plugin-Bereich (Dropdown-Menü) "Laden von Dialogverlauf" aufrufen, um den vorherigen Dialog wiederherzustellen. Tipp: Wenn Sie keine Datei angeben und stattdessen direkt auf "Laden des Dialogverlaufs" klicken, können Sie das HTML-Cache-Archiv anzeigen. Durch Klicken auf "Löschen aller lokalen Dialogverlaufsdatensätze" können alle HTML-Archiv-Caches gelöscht werden.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/235222390-24a9acc0-680f-49f5-bc81-2f3161f1e049.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
2. Berichterstellung. Die meisten Plugins generieren nach Abschluss der Ausführung einen Arbeitsbericht.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
3. Modularisierte Funktionsgestaltung, einfache Schnittstellen mit leistungsstarken Funktionen.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
4. Dies ist ein Open-Source-Projekt, das sich "selbst übersetzen" kann.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
5. Die Übersetzung anderer Open-Source-Projekte ist kein Problem.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
6. Dekorieren Sie [`live2d`](https://github.com/fghrsh/live2d_demo) mit kleinen Funktionen (standardmäßig deaktiviert, Änderungen an `config.py` erforderlich).
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/236432361-67739153-73e8-43fe-8111-b61296edabd9.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
7. Neue MOSS-Sprachmodellunterstützung.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/236639178-92836f37-13af-4fdd-984d-b4450fe30336.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
8. OpenAI-Bildgenerierung.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
9. OpenAI-Audio-Analyse und Zusammenfassung.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
10. Latex-Proofreading des gesamten Textes.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/651ccd98-02c9-4464-91e1-77a6b7d1b033" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
## Version:
|
||||||
|
- Version 3.5 (Todo): Rufen Sie alle Funktionserweiterungen dieses Projekts mit natürlicher Sprache auf (hohe Priorität).
|
||||||
|
- Version 3.4 (Todo): Verbesserte Unterstützung mehrerer Threads für Local Large Model (LLM).
|
||||||
|
- Version 3.3: + Internet-Informationssynthese-Funktion
|
||||||
|
- Version 3.2: Funktionserweiterungen unterstützen mehr Parameter-Schnittstellen (Speicherung von Dialogen, Interpretation beliebigen Sprachcodes + gleichzeitige Abfrage jeder LLM-Kombination)
|
||||||
|
- Version 3.1: Unterstützung mehrerer GPT-Modelle gleichzeitig! Unterstützung für API2D, Unterstützung für Lastenausgleich von mehreren API-Schlüsseln.
|
||||||
|
- Version 3.0: Unterstützung von Chatglm und anderen kleinen LLMs
|
||||||
|
- Version 2.6: Umstrukturierung der Plugin-Struktur zur Verbesserung der Interaktivität, Einführung weiterer Plugins
|
||||||
|
- Version 2.5: Automatische Aktualisierung, Problembehebung bei Quelltexten großer Projekte, wenn der Text zu lang ist oder Token überlaufen.
|
||||||
|
- Version 2.4: (1) Neue Funktion zur Übersetzung des gesamten PDF-Texts; (2) Neue Funktion zum Wechseln der Position des Eingabebereichs; (3) Neue Option für vertikales Layout; (4) Optimierung von Multithread-Funktions-Plugins.
|
||||||
|
- Version 2.3: Verbesserte Interaktivität mit mehreren Threads
|
||||||
|
- Version 2.2: Funktionserweiterungen unterstützen "Hot-Reload"
|
||||||
|
- Version 2.1: Faltbares Layout
|
||||||
|
- Version 2.0: Einführung von modularisierten Funktionserweiterungen
|
||||||
|
- Version 1.0: Grundlegende Funktionengpt_academic Entwickler QQ-Gruppe-2: 610599535
|
||||||
|
|
||||||
|
- Bekannte Probleme
|
||||||
|
- Einige Browser-Übersetzungs-Plugins können die Frontend-Ausführung dieser Software stören.
|
||||||
|
- Sowohl eine zu hohe als auch eine zu niedrige Version von Gradio führt zu verschiedenen Ausnahmen.
|
||||||
|
|
||||||
|
## Referenz und Lernen
|
||||||
|
|
||||||
|
```
|
||||||
|
Der Code bezieht sich auf viele Designs von anderen herausragenden Projekten, insbesondere:
|
||||||
|
|
||||||
|
# Projekt 1: ChatGLM-6B der Tsinghua Universität:
|
||||||
|
https://github.com/THUDM/ChatGLM-6B
|
||||||
|
|
||||||
|
# Projekt 2: JittorLLMs der Tsinghua Universität:
|
||||||
|
https://github.com/Jittor/JittorLLMs
|
||||||
|
|
||||||
|
# Projekt 3: Edge-GPT:
|
||||||
|
https://github.com/acheong08/EdgeGPT
|
||||||
|
|
||||||
|
# Projekt 4: ChuanhuChatGPT:
|
||||||
|
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||||
|
|
||||||
|
# Projekt 5: ChatPaper:
|
||||||
|
https://github.com/kaixindelele/ChatPaper
|
||||||
|
|
||||||
|
# Mehr:
|
||||||
|
https://github.com/gradio-app/gradio
|
||||||
|
https://github.com/fghrsh/live2d_demo
|
||||||
|
```
|
||||||
316
docs/README.md.Italian.md
普通文件
316
docs/README.md.Italian.md
普通文件
@@ -0,0 +1,316 @@
|
|||||||
|
> **Nota**
|
||||||
|
>
|
||||||
|
> Durante l'installazione delle dipendenze, selezionare rigorosamente le **versioni specificate** nel file requirements.txt.
|
||||||
|
>
|
||||||
|
> ` pip install -r requirements.txt`
|
||||||
|
|
||||||
|
# <img src="logo.png" width="40" > GPT Ottimizzazione Accademica (GPT Academic)
|
||||||
|
|
||||||
|
**Se ti piace questo progetto, ti preghiamo di dargli una stella. Se hai sviluppato scorciatoie accademiche o plugin funzionali più utili, non esitare ad aprire una issue o pull request. Abbiamo anche una README in [Inglese|](README_EN.md)[Giapponese|](README_JP.md)[Coreano|](https://github.com/mldljyh/ko_gpt_academic)[Russo|](README_RS.md)[Francese](README_FR.md) tradotta da questo stesso progetto.
|
||||||
|
Per tradurre questo progetto in qualsiasi lingua con GPT, leggere e eseguire [`multi_language.py`](multi_language.py) (sperimentale).
|
||||||
|
|
||||||
|
> **Nota**
|
||||||
|
>
|
||||||
|
> 1. Si prega di notare che solo i plugin (pulsanti) contrassegnati in **rosso** supportano la lettura di file, alcuni plugin sono posizionati nel **menu a discesa** nella zona dei plugin. Accettiamo e gestiamo PR per qualsiasi nuovo plugin con **massima priorità**!
|
||||||
|
>
|
||||||
|
> 2. Le funzionalità di ogni file di questo progetto sono descritte dettagliatamente nella propria analisi di autotraduzione [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). Con l'iterazione delle versioni, è possibile fare clic sui plugin funzionali correlati in qualsiasi momento per richiamare GPT e generare nuovamente il rapporto di analisi automatica del progetto. Le domande frequenti sono riassunte nella [`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Metodo di installazione] (#installazione).
|
||||||
|
>
|
||||||
|
> 3. Questo progetto è compatibile e incoraggia l'utilizzo di grandi modelli di linguaggio di produzione nazionale come chatglm, RWKV, Pangu ecc. Supporta la coesistenza di più api-key e può essere compilato nel file di configurazione come `API_KEY="openai-key1,openai-key2,api2d-key3"`. Per sostituire temporaneamente `API_KEY`, inserire `API_KEY` temporaneo nell'area di input e premere Invio per renderlo effettivo.
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
|
||||||
|
Funzione | Descrizione
|
||||||
|
--- | ---
|
||||||
|
Correzione immediata | Supporta correzione immediata e ricerca degli errori di grammatica del documento con un solo clic
|
||||||
|
Traduzione cinese-inglese immediata | Traduzione cinese-inglese immediata con un solo clic
|
||||||
|
Spiegazione del codice immediata | Visualizzazione del codice, spiegazione del codice, generazione del codice, annotazione del codice con un solo clic
|
||||||
|
[Scorciatoie personalizzate](https://www.bilibili.com/video/BV14s4y1E7jN) | Supporta scorciatoie personalizzate
|
||||||
|
Design modularizzato | Supporta potenti [plugin di funzioni](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions) personalizzati, i plugin supportano l'[aggiornamento in tempo reale](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
||||||
|
[Auto-profiling del programma](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin di funzioni] [Comprensione immediata](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) del codice sorgente di questo progetto
|
||||||
|
[Analisi del programma](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin di funzioni] Un clic può analizzare l'albero di altri progetti Python/C/C++/Java/Lua/...
|
||||||
|
Lettura del documento, [traduzione](https://www.bilibili.com/video/BV1KT411x7Wn) del documento | [Plugin di funzioni] La lettura immediata dell'intero documento latex/pdf di un documento e la generazione di un riassunto
|
||||||
|
Traduzione completa di un documento Latex, [correzione immediata](https://www.bilibili.com/video/BV1FT411H7c5/) | [Plugin di funzioni] Una traduzione o correzione immediata di un documento Latex
|
||||||
|
Generazione di annotazioni in batch | [Plugin di funzioni] Generazione automatica delle annotazioni di funzione con un solo clic
|
||||||
|
[Traduzione cinese-inglese di Markdown](https://www.bilibili.com/video/BV1yo4y157jV/) | [Plugin di funzioni] Hai letto il [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) delle cinque lingue sopra?
|
||||||
|
Generazione di report di analisi di chat | [Plugin di funzioni] Generazione automatica di un rapporto di sintesi dopo l'esecuzione
|
||||||
|
[Funzione di traduzione di tutto il documento PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugin di funzioni] Estrarre il titolo e il sommario dell'articolo PDF + tradurre l'intero testo (multithreading)
|
||||||
|
[Assistente di Arxiv](https://www.bilibili.com/video/BV1LM4y1279X) | [Plugin di funzioni] Inserire l'URL dell'articolo di Arxiv e tradurre il sommario con un clic + scaricare il PDF
|
||||||
|
[Assistente integrato di Google Scholar](https://www.bilibili.com/video/BV19L411U7ia) | [Plugin di funzioni] Con qualsiasi URL di pagina di ricerca di Google Scholar, lascia che GPT ti aiuti a scrivere il tuo [relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/)
|
||||||
|
Aggregazione delle informazioni su Internet + GPT | [Plugin di funzioni] Fai in modo che GPT rilevi le informazioni su Internet prima di rispondere alle domande, senza mai diventare obsolete
|
||||||
|
Visualizzazione di formule/img/tabelle | È possibile visualizzare un'equazione in forma [tex e render](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png) contemporaneamente, supporta equazioni e evidenziazione del codice
|
||||||
|
Supporto per plugin di funzioni multithreading | Supporto per chiamata multithreaded di chatgpt, elaborazione con un clic di grandi quantità di testo o di un programma
|
||||||
|
Avvia il tema di gradio [scuro](https://github.com/binary-husky/gpt_academic/issues/173) | Aggiungere ```/?__theme=dark``` dopo l'URL del browser per passare a un tema scuro
|
||||||
|
Supporto per maggiori modelli LLM, supporto API2D | Sentirsi serviti simultaneamente da GPT3.5, GPT4, [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS) deve essere una grande sensazione, giusto?
|
||||||
|
Ulteriori modelli LLM supportat,i supporto per l'implementazione di Huggingface | Aggiunta di un'interfaccia Newbing (Nuovo Bing), introdotta la compatibilità con Tsinghua [Jittorllms](https://github.com/Jittor/JittorLLMs), [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) e [PanGu-α](https://openi.org.cn/pangu/)
|
||||||
|
Ulteriori dimostrazioni di nuove funzionalità (generazione di immagini, ecc.)... | Vedere la fine di questo documento...
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
- Nuova interfaccia (modificare l'opzione LAYOUT in `config.py` per passare dal layout a sinistra e a destra al layout superiore e inferiore)
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||||
|
</div>Sei un traduttore professionista di paper accademici.
|
||||||
|
|
||||||
|
- Tutti i pulsanti vengono generati dinamicamente leggendo il file functional.py, e aggiungerci nuove funzionalità è facile, liberando la clipboard.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Revisione/Correzione
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Se l'output contiene una formula, viene visualizzata sia come testo che come formula renderizzata, per facilitare la copia e la visualizzazione.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Non hai tempo di leggere il codice del progetto? Passa direttamente a chatgpt e chiedi informazioni.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Chiamata mista di vari modelli di lingua di grandi dimensioni (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
---
|
||||||
|
# Installazione
|
||||||
|
## Installazione - Metodo 1: Esecuzione diretta (Windows, Linux o MacOS)
|
||||||
|
|
||||||
|
1. Scarica il progetto
|
||||||
|
```sh
|
||||||
|
git clone https://github.com/binary-husky/gpt_academic.git
|
||||||
|
cd gpt_academic
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Configura API_KEY
|
||||||
|
|
||||||
|
In `config.py`, configura la tua API KEY e altre impostazioni, [configs for special network environments](https://github.com/binary-husky/gpt_academic/issues/1).
|
||||||
|
|
||||||
|
(N.B. Quando il programma viene eseguito, verifica prima se esiste un file di configurazione privato chiamato `config_private.py` e sovrascrive le stesse configurazioni in `config.py`. Pertanto, se capisci come funziona la nostra logica di lettura della configurazione, ti consigliamo vivamente di creare un nuovo file di configurazione chiamato `config_private.py` accanto a `config.py`, e spostare (copiare) le configurazioni di `config.py` in `config_private.py`. 'config_private.py' non è sotto la gestione di git e può proteggere ulteriormente le tue informazioni personali. NB Il progetto supporta anche la configurazione della maggior parte delle opzioni tramite "variabili d'ambiente". La sintassi della variabile d'ambiente è descritta nel file `docker-compose`. Priorità di lettura: "variabili d'ambiente" > "config_private.py" > "config.py")
|
||||||
|
|
||||||
|
|
||||||
|
3. Installa le dipendenze
|
||||||
|
```sh
|
||||||
|
# (Scelta I: se sei familiare con python) (python 3.9 o superiore, più nuovo è meglio), N.B.: utilizza il repository ufficiale pip o l'aliyun pip repository, metodo temporaneo per cambiare il repository: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||||
|
python -m pip install -r requirements.txt
|
||||||
|
|
||||||
|
# (Scelta II: se non conosci Python) utilizza anaconda, il processo è simile (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||||
|
conda create -n gptac_venv python=3.11 # crea l'ambiente anaconda
|
||||||
|
conda activate gptac_venv # attiva l'ambiente anaconda
|
||||||
|
python -m pip install -r requirements.txt # questo passaggio funziona allo stesso modo dell'installazione con pip
|
||||||
|
```
|
||||||
|
|
||||||
|
<details><summary>Se si desidera supportare ChatGLM di Tsinghua/MOSS di Fudan come backend, fare clic qui per espandere</summary>
|
||||||
|
<p>
|
||||||
|
|
||||||
|
【Passaggio facoltativo】 Se si desidera supportare ChatGLM di Tsinghua/MOSS di Fudan come backend, è necessario installare ulteriori dipendenze (prerequisiti: conoscenza di Python, esperienza con Pytorch e computer sufficientemente potente):
|
||||||
|
```sh
|
||||||
|
# 【Passaggio facoltativo I】 Supporto a ChatGLM di Tsinghua. Note su ChatGLM di Tsinghua: in caso di errore "Call ChatGLM fail 不能正常加载ChatGLM的参数" , fare quanto segue: 1. Per impostazione predefinita, viene installata la versione di torch + cpu; per usare CUDA, è necessario disinstallare torch e installare nuovamente torch + cuda; 2. Se non è possibile caricare il modello a causa di una configurazione insufficiente del computer, è possibile modificare la precisione del modello in request_llm/bridge_chatglm.py, cambiando AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) in AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||||
|
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||||
|
|
||||||
|
# 【Passaggio facoltativo II】 Supporto a MOSS di Fudan
|
||||||
|
python -m pip install -r request_llm/requirements_moss.txt
|
||||||
|
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Si prega di notare che quando si esegue questa riga di codice, si deve essere nella directory radice del progetto
|
||||||
|
|
||||||
|
# 【Passaggio facoltativo III】 Assicurati che il file di configurazione config.py includa tutti i modelli desiderati, al momento tutti i modelli supportati sono i seguenti (i modelli della serie jittorllms attualmente supportano solo la soluzione docker):
|
||||||
|
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||||
|
```
|
||||||
|
|
||||||
|
</p>
|
||||||
|
</details>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
4. Esegui
|
||||||
|
```sh
|
||||||
|
python main.py
|
||||||
|
```5. Plugin di test delle funzioni
|
||||||
|
```
|
||||||
|
- Funzione plugin di test (richiede una risposta gpt su cosa è successo oggi in passato), puoi utilizzare questa funzione come template per implementare funzionalità più complesse
|
||||||
|
Clicca su "[Demo del plugin di funzione] Oggi nella storia"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Installazione - Metodo 2: Utilizzo di Docker
|
||||||
|
|
||||||
|
1. Solo ChatGPT (consigliato per la maggior parte delle persone)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
git clone https://github.com/binary-husky/gpt_academic.git # scarica il progetto
|
||||||
|
cd gpt_academic # entra nel percorso
|
||||||
|
nano config.py # con un qualsiasi editor di testo, modifica config.py configurando "Proxy", "API_KEY" e "WEB_PORT" (ad esempio 50923)
|
||||||
|
docker build -t gpt-academic . # installa
|
||||||
|
|
||||||
|
#(ultimo passaggio - selezione 1) In un ambiente Linux, utilizzare '--net=host' è più conveniente e veloce
|
||||||
|
docker run --rm -it --net=host gpt-academic
|
||||||
|
#(ultimo passaggio - selezione 2) In un ambiente MacOS/Windows, l'opzione -p può essere utilizzata per esporre la porta del contenitore (ad es. 50923) alla porta della macchina
|
||||||
|
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
|
||||||
|
```
|
||||||
|
|
||||||
|
2. ChatGPT + ChatGLM + MOSS (richiede familiarità con Docker)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
# Modifica docker-compose.yml, elimina i piani 1 e 3, mantieni il piano 2. Modifica la configurazione del piano 2 in docker-compose.yml, si prega di fare riferimento alle relative annotazioni
|
||||||
|
docker-compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
3. ChatGPT + LLAMA + Pangu + RWKV (richiede familiarità con Docker)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
# Modifica docker-compose.yml, elimina i piani 1 e 2, mantieni il piano 3. Modifica la configurazione del piano 3 in docker-compose.yml, si prega di fare riferimento alle relative annotazioni
|
||||||
|
docker-compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Installazione - Metodo 3: Altre modalità di distribuzione
|
||||||
|
|
||||||
|
1. Come utilizzare un URL di reindirizzamento / AzureAPI Cloud Microsoft
|
||||||
|
Configura API_URL_REDIRECT seguendo le istruzioni nel file `config.py`.
|
||||||
|
|
||||||
|
2. Distribuzione su un server cloud remoto (richiede conoscenze ed esperienza di server cloud)
|
||||||
|
Si prega di visitare [wiki di distribuzione-1] (https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||||
|
|
||||||
|
3. Utilizzo di WSL2 (Windows Subsystem for Linux)
|
||||||
|
Si prega di visitare [wiki di distribuzione-2] (https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||||
|
|
||||||
|
4. Come far funzionare ChatGPT all'interno di un sottodominio (ad es. `http://localhost/subpath`)
|
||||||
|
Si prega di visitare [Istruzioni per l'esecuzione con FastAPI] (docs/WithFastapi.md)
|
||||||
|
|
||||||
|
5. Utilizzo di docker-compose per l'esecuzione
|
||||||
|
Si prega di leggere il file docker-compose.yml e seguire le istruzioni fornite.
|
||||||
|
|
||||||
|
---
|
||||||
|
# Uso avanzato
|
||||||
|
## Personalizzazione dei pulsanti / Plugin di funzione personalizzati
|
||||||
|
|
||||||
|
1. Personalizzazione dei pulsanti (scorciatoie accademiche)
|
||||||
|
Apri `core_functional.py` con qualsiasi editor di testo e aggiungi la voce seguente, quindi riavvia il programma (se il pulsante è già stato aggiunto con successo e visibile, il prefisso e il suffisso supportano la modifica in tempo reale, senza bisogno di riavviare il programma).
|
||||||
|
|
||||||
|
ad esempio
|
||||||
|
```
|
||||||
|
"超级英译中": {
|
||||||
|
# Prefisso, verrà aggiunto prima del tuo input. Ad esempio, descrivi la tua richiesta, come tradurre, spiegare il codice, correggere errori, ecc.
|
||||||
|
"Prefix": "Per favore traduci questo testo in Cinese, e poi spiega tutti i termini tecnici nel testo con una tabella markdown:\n\n",
|
||||||
|
|
||||||
|
# Suffisso, verrà aggiunto dopo il tuo input. Ad esempio, con il prefisso puoi circondare il tuo input con le virgolette.
|
||||||
|
"Suffix": "",
|
||||||
|
},
|
||||||
|
```
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
2. Plugin di funzione personalizzati
|
||||||
|
|
||||||
|
Scrivi plugin di funzione personalizzati e esegui tutte le attività che desideri o non hai mai pensato di fare.
|
||||||
|
La difficoltà di scrittura e debug dei plugin del nostro progetto è molto bassa. Se si dispone di una certa conoscenza di base di Python, è possibile realizzare la propria funzione del plugin seguendo il nostro modello. Per maggiori dettagli, consultare la [guida al plugin per funzioni](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
|
||||||
|
|
||||||
|
---
|
||||||
|
# Ultimo aggiornamento
|
||||||
|
## Nuove funzionalità dinamiche
|
||||||
|
|
||||||
|
1. Funzionalità di salvataggio della conversazione. Nell'area dei plugin della funzione, fare clic su "Salva la conversazione corrente" per salvare la conversazione corrente come file html leggibile e ripristinabile, inoltre, nell'area dei plugin della funzione (menu a discesa), fare clic su "Carica la cronologia della conversazione archiviata" per ripristinare la conversazione precedente. Suggerimento: fare clic su "Carica la cronologia della conversazione archiviata" senza specificare il file consente di visualizzare la cache degli archivi html di cronologia, fare clic su "Elimina tutti i record di cronologia delle conversazioni locali" per eliminare tutte le cache degli archivi html.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/235222390-24a9acc0-680f-49f5-bc81-2f3161f1e049.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
2. Generazione di rapporti. La maggior parte dei plugin genera un rapporto di lavoro dopo l'esecuzione.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
3. Progettazione modulare delle funzioni, semplici interfacce ma in grado di supportare potenti funzionalità.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
4. Questo è un progetto open source che può "tradursi da solo".
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
5. Tradurre altri progetti open source è semplice.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
6. Piccola funzione decorativa per [live2d](https://github.com/fghrsh/live2d_demo) (disattivata per impostazione predefinita, è necessario modificare `config.py`).
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/236432361-67739153-73e8-43fe-8111-b61296edabd9.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
7. Supporto del grande modello linguistico MOSS
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/236639178-92836f37-13af-4fdd-984d-b4450fe30336.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
8. Generazione di immagini OpenAI
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
9. Analisi e sintesi audio OpenAI
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
10. Verifica completa dei testi in LaTeX
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/651ccd98-02c9-4464-91e1-77a6b7d1b033" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
## Versione:
|
||||||
|
- versione 3.5(Todo): utilizzo del linguaggio naturale per chiamare tutti i plugin di funzioni del progetto (alta priorità)
|
||||||
|
- versione 3.4(Todo): supporto multi-threading per il grande modello linguistico locale Chatglm
|
||||||
|
- versione 3.3: +funzionalità di sintesi delle informazioni su Internet
|
||||||
|
- versione 3.2: i plugin di funzioni supportano più interfacce dei parametri (funzionalità di salvataggio della conversazione, lettura del codice in qualsiasi lingua + richiesta simultanea di qualsiasi combinazione di LLM)
|
||||||
|
- versione 3.1: supporto per interrogare contemporaneamente più modelli gpt! Supporto api2d, bilanciamento del carico per più apikey
|
||||||
|
- versione 3.0: supporto per Chatglm e altri piccoli LLM
|
||||||
|
- versione 2.6: ristrutturazione della struttura del plugin, miglioramento dell'interattività, aggiunta di più plugin
|
||||||
|
- versione 2.5: auto-aggiornamento, risoluzione del problema di testo troppo lungo e overflow del token durante la sintesi di grandi progetti di ingegneria
|
||||||
|
- versione 2.4: (1) funzionalità di traduzione dell'intero documento in formato PDF aggiunta; (2) funzionalità di scambio dell'area di input aggiunta; (3) opzione di layout verticale aggiunta; (4) ottimizzazione della funzione di plugin multi-threading.
|
||||||
|
- versione 2.3: miglioramento dell'interattività multi-threading
|
||||||
|
- versione 2.2: i plugin di funzioni supportano l'hot-reload
|
||||||
|
- versione 2.1: layout ripiegabile
|
||||||
|
- versione 2.0: introduzione di plugin di funzioni modulari
|
||||||
|
- versione 1.0: funzione di basegpt_academic sviluppatori gruppo QQ-2: 610599535
|
||||||
|
|
||||||
|
- Problemi noti
|
||||||
|
- Alcuni plugin di traduzione del browser interferiscono con l'esecuzione del frontend di questo software
|
||||||
|
- La versione di gradio troppo alta o troppo bassa può causare diversi malfunzionamenti
|
||||||
|
|
||||||
|
## Riferimenti e apprendimento
|
||||||
|
|
||||||
|
```
|
||||||
|
Il codice fa riferimento a molte altre eccellenti progettazioni di progetti, principalmente:
|
||||||
|
|
||||||
|
# Progetto 1: ChatGLM-6B di Tsinghua:
|
||||||
|
https://github.com/THUDM/ChatGLM-6B
|
||||||
|
|
||||||
|
# Progetto 2: JittorLLMs di Tsinghua:
|
||||||
|
https://github.com/Jittor/JittorLLMs
|
||||||
|
|
||||||
|
# Progetto 3: Edge-GPT:
|
||||||
|
https://github.com/acheong08/EdgeGPT
|
||||||
|
|
||||||
|
# Progetto 4: ChuanhuChatGPT:
|
||||||
|
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||||
|
|
||||||
|
# Progetto 5: ChatPaper:
|
||||||
|
https://github.com/kaixindelele/ChatPaper
|
||||||
|
|
||||||
|
# Altro:
|
||||||
|
https://github.com/gradio-app/gradio
|
||||||
|
https://github.com/fghrsh/live2d_demo
|
||||||
|
```
|
||||||
270
docs/README.md.Korean.md
普通文件
270
docs/README.md.Korean.md
普通文件
@@ -0,0 +1,270 @@
|
|||||||
|
> **노트**
|
||||||
|
>
|
||||||
|
> 의존성을 설치할 때는 반드시 requirements.txt에서 **지정된 버전**을 엄격하게 선택하십시오.
|
||||||
|
>
|
||||||
|
> `pip install -r requirements.txt`
|
||||||
|
|
||||||
|
# <img src="docs/logo.png" width="40" > GPT 학술 최적화 (GPT Academic)
|
||||||
|
|
||||||
|
**이 프로젝트가 마음에 드신다면 Star를 주세요. 추가로 유용한 학술 단축키나 기능 플러그인이 있다면 이슈나 pull request를 남기세요. 이 프로젝트에 대한 [영어 |](docs/README_EN.md)[일본어 |](docs/README_JP.md)[한국어 |](https://github.com/mldljyh/ko_gpt_academic)[러시아어 |](docs/README_RS.md)[프랑스어](docs/README_FR.md)로 된 README도 있습니다.
|
||||||
|
GPT를 이용하여 프로젝트를 임의의 언어로 번역하려면 [`multi_language.py`](multi_language.py)를 읽고 실행하십시오. (실험적)
|
||||||
|
|
||||||
|
> **노트**
|
||||||
|
>
|
||||||
|
> 1. 파일을 읽기 위해 **빨간색**으로 표시된 기능 플러그인 (버튼) 만 지원됩니다. 일부 플러그인은 플러그인 영역의 **드롭다운 메뉴**에 있습니다. 또한 새로운 플러그인은 **가장 높은 우선순위**로 환영하며 처리합니다!
|
||||||
|
>
|
||||||
|
> 2. 이 프로젝트의 각 파일의 기능을 [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)에서 자세히 설명합니다. 버전이 업데이트 됨에 따라 관련된 기능 플러그인을 클릭하고 GPT를 호출하여 프로젝트의 자체 분석 보고서를 다시 생성할 수도 있습니다. 자주 묻는 질문은 [`위키`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)에서 볼 수 있습니다. [설치 방법](#installation).
|
||||||
|
>
|
||||||
|
> 3. 이 프로젝트는 국내 언어 모델 chatglm과 RWKV, 판고 등의 시도와 호환 가능합니다. 여러 개의 api-key를 지원하며 설정 파일에 "API_KEY="openai-key1,openai-key2,api2d-key3""와 같이 작성할 수 있습니다. `API_KEY`를 임시로 변경해야하는 경우 입력 영역에 임시 `API_KEY`를 입력 한 후 엔터 키를 누르면 즉시 적용됩니다.
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
|
||||||
|
기능 | 설명
|
||||||
|
--- | ---
|
||||||
|
원 키워드 | 원 키워드 및 논문 문법 오류를 찾는 기능 지원
|
||||||
|
한-영 키워드 | 한-영 키워드 지원
|
||||||
|
코드 설명 | 코드 표시, 코드 설명, 코드 생성, 코드에 주석 추가
|
||||||
|
[사용자 정의 바로 가기 키](https://www.bilibili.com/video/BV14s4y1E7jN) | 사용자 정의 바로 가기 키 지원
|
||||||
|
모듈식 설계 | 강력한[함수 플러그인](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions) 지원, 플러그인이 [램 업데이트](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)를 지원합니다.
|
||||||
|
[자체 프로그램 분석](https://www.bilibili.com/video/BV1cj411A7VW) | [함수 플러그인] [원 키 우드] 프로젝트 소스 코드의 내용을 이해하는 기능을 제공
|
||||||
|
[프로그램 분석](https://www.bilibili.com/video/BV1cj411A7VW) | [함수 플러그인] 프로젝트 트리를 분석할 수 있습니다 (Python/C/C++/Java/Lua/...)
|
||||||
|
논문 읽기, 번역 | [함수 플러그인] LaTex/PDF 논문의 전문을 읽고 요약을 생성합니다.
|
||||||
|
LaTeX 텍스트[번역](https://www.bilibili.com/video/BV1nk4y1Y7Js/), [원 키워드](https://www.bilibili.com/video/BV1FT411H7c5/) | [함수 플러그인] LaTeX 논문의 번역 또는 개량을 위해 일련의 모드를 번역할 수 있습니다.
|
||||||
|
대량의 주석 생성 | [함수 플러그인] 함수 코멘트를 대량으로 생성할 수 있습니다.
|
||||||
|
Markdown 한-영 번역 | [함수 플러그인] 위의 5 종 언어의 [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)를 볼 수 있습니다.
|
||||||
|
chat 분석 보고서 생성 | [함수 플러그인] 수행 후 요약 보고서를 자동으로 생성합니다.
|
||||||
|
[PDF 논문 번역](https://www.bilibili.com/video/BV1KT411x7Wn) | [함수 플러그인] PDF 논문이 제목 및 요약을 추출한 후 번역됩니다. (멀티 스레드)
|
||||||
|
[Arxiv 도우미](https://www.bilibili.com/video/BV1LM4y1279X) | [함수 플러그인] Arxiv 논문 URL을 입력하면 요약을 번역하고 PDF를 다운로드 할 수 있습니다.
|
||||||
|
[Google Scholar 통합 도우미](https://www.bilibili.com/video/BV19L411U7ia) | [함수 플러그인] Google Scholar 검색 페이지 URL을 제공하면 gpt가 [Related Works 작성](https://www.bilibili.com/video/BV1GP411U7Az/)을 도와줍니다.
|
||||||
|
인터넷 정보 집계+GPT | [함수 플러그인] 먼저 GPT가 인터넷에서 정보를 수집하고 질문에 대답 할 수 있도록합니다. 정보가 절대적으로 구식이 아닙니다.
|
||||||
|
수식/이미지/표 표시 | 급여, 코드 강조 기능 지원
|
||||||
|
멀티 스레드 함수 플러그인 지원 | Chatgpt를 여러 요청에서 실행하여 [대량의 텍스트](https://www.bilibili.com/video/BV1FT411H7c5/) 또는 프로그램을 처리 할 수 있습니다.
|
||||||
|
다크 그라디오 테마 시작 | 어둡게 주제를 변경하려면 브라우저 URL 끝에 ```/?__theme=dark```을 추가하면됩니다.
|
||||||
|
[다중 LLM 모델](https://www.bilibili.com/video/BV1wT411p7yf) 지원, [API2D](https://api2d.com/) 인터페이스 지원됨 | GPT3.5, GPT4, [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS)가 모두 동시에 작동하는 것처럼 느낄 수 있습니다!
|
||||||
|
LLM 모델 추가 및[huggingface 배치](https://huggingface.co/spaces/qingxu98/gpt-academic) 지원 | 새 Bing 인터페이스 (새 Bing) 추가, Clearing House [Jittorllms](https://github.com/Jittor/JittorLLMs) 지원 [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) 및 [盘古α](https://openi.org.cn/pangu/)
|
||||||
|
기타 새로운 기능 (이미지 생성 등) ... | 이 문서의 끝부분을 참조하세요. ...- 모든 버튼은 functional.py를 동적으로 읽어와서 사용자 정의 기능을 자유롭게 추가할 수 있으며, 클립 보드를 해제합니다.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- 검수/오타 교정
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- 출력에 수식이 포함되어 있으면 텍스와 렌더링의 형태로 동시에 표시되어 복사 및 읽기가 용이합니다.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- 프로젝트 코드를 볼 시간이 없습니까? 전체 프로젝트를 chatgpt에 직접 표시하십시오
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- 다양한 대형 언어 모델 범용 요청 (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
---
|
||||||
|
# 설치
|
||||||
|
## Installation-Method 1: Run directly (Windows, Linux or MacOS)
|
||||||
|
|
||||||
|
1. 프로젝트 다운로드
|
||||||
|
```sh
|
||||||
|
git clone https://github.com/binary-husky/gpt_academic.git
|
||||||
|
cd gpt_academic
|
||||||
|
```
|
||||||
|
|
||||||
|
2. API_KEY 구성
|
||||||
|
|
||||||
|
`config.py`에서 API KEY 등 설정을 구성합니다. [특별한 네트워크 환경 설정](https://github.com/binary-husky/gpt_academic/issues/1) .
|
||||||
|
|
||||||
|
(P.S. 프로그램이 실행될 때, 이름이 `config_private.py`인 기밀 설정 파일이 있는지 우선적으로 확인하고 해당 설정으로 `config.py`의 동일한 이름의 설정을 덮어씁니다. 따라서 구성 읽기 논리를 이해할 수 있다면, `config.py` 옆에 `config_private.py`라는 새 구성 파일을 만들고 `config.py`의 구성을 `config_private.py`로 이동(복사)하는 것이 좋습니다. `config_private.py`는 git으로 관리되지 않으며 개인 정보를 더 안전하게 보호할 수 있습니다. P.S. 프로젝트는 또한 대부분의 옵션을 `환경 변수`를 통해 설정할 수 있으며, `docker-compose` 파일을 참조하여 환경 변수 작성 형식을 확인할 수 있습니다. 우선순위: `환경 변수` > `config_private.py` > `config.py`)
|
||||||
|
|
||||||
|
|
||||||
|
3. 의존성 설치
|
||||||
|
```sh
|
||||||
|
# (I 선택: 기존 python 경험이 있다면) (python 버전 3.9 이상, 최신 버전이 좋습니다), 참고: 공식 pip 소스 또는 알리 pip 소스 사용, 일시적인 교체 방법: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||||
|
python -m pip install -r requirements.txt
|
||||||
|
|
||||||
|
# (II 선택: Python에 익숙하지 않은 경우) anaconda 사용 방법은 비슷함(https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||||
|
conda create -n gptac_venv python=3.11 # anaconda 환경 만들기
|
||||||
|
conda activate gptac_venv # anaconda 환경 활성화
|
||||||
|
python -m pip install -r requirements.txt # 이 단계도 pip install의 단계와 동일합니다.
|
||||||
|
```
|
||||||
|
|
||||||
|
<details><summary>추가지원을 위해 Tsinghua ChatGLM / Fudan MOSS를 사용해야하는 경우 지원을 클릭하여 이 부분을 확장하세요.</summary>
|
||||||
|
<p>
|
||||||
|
|
||||||
|
[Tsinghua ChatGLM] / [Fudan MOSS]를 백엔드로 사용하려면 추가적인 종속성을 설치해야합니다 (전제 조건 : Python을 이해하고 Pytorch를 사용한 적이 있으며, 컴퓨터가 충분히 강력한 경우) :
|
||||||
|
```sh
|
||||||
|
# [선택 사항 I] Tsinghua ChatGLM을 지원합니다. Tsinghua ChatGLM에 대한 참고사항 : "Call ChatGLM fail cannot load ChatGLM parameters normally" 오류 발생시 다음 참조:
|
||||||
|
# 1 : 기본 설치된 것들은 torch + cpu 버전입니다. cuda를 사용하려면 torch를 제거한 다음 torch + cuda를 다시 설치해야합니다.
|
||||||
|
# 2 : 모델을 로드할 수 없는 기계 구성 때문에, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)를
|
||||||
|
# AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)로 변경합니다.
|
||||||
|
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||||
|
|
||||||
|
# [선택 사항 II] Fudan MOSS 지원
|
||||||
|
python -m pip install -r request_llm/requirements_moss.txt
|
||||||
|
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # 다음 코드 줄을 실행할 때 프로젝트 루트 경로에 있어야합니다.
|
||||||
|
|
||||||
|
# [선택 사항III] AVAIL_LLM_MODELS config.py 구성 파일에 기대하는 모델이 포함되어 있는지 확인하십시오.
|
||||||
|
# 현재 지원되는 전체 모델 :
|
||||||
|
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||||
|
```
|
||||||
|
|
||||||
|
</p>
|
||||||
|
</details>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
4. 실행
|
||||||
|
```sh
|
||||||
|
python main.py
|
||||||
|
```5. 테스트 함수 플러그인
|
||||||
|
```
|
||||||
|
- 테스트 함수 플러그인 템플릿 함수 (GPT에게 오늘의 역사에서 무슨 일이 일어났는지 대답하도록 요청)를 구현하는 데 사용할 수 있습니다. 이 함수를 기반으로 더 복잡한 기능을 구현할 수 있습니다.
|
||||||
|
"[함수 플러그인 템플릿 데모] 오늘의 역사"를 클릭하세요.
|
||||||
|
```
|
||||||
|
|
||||||
|
## 설치 - 방법 2 : 도커 사용
|
||||||
|
|
||||||
|
1. ChatGPT 만 (대부분의 사람들이 선택하는 것을 권장합니다.)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
git clone https://github.com/binary-husky/gpt_academic.git # 다운로드
|
||||||
|
cd gpt_academic # 경로 이동
|
||||||
|
nano config.py # 아무 텍스트 에디터로 config.py를 열고 "Proxy","API_KEY","WEB_PORT" (예 : 50923) 등을 구성합니다.
|
||||||
|
docker build -t gpt-academic . # 설치
|
||||||
|
|
||||||
|
#(마지막 단계-1 선택) Linux 환경에서는 --net=host를 사용하면 더 편리합니다.
|
||||||
|
docker run --rm -it --net=host gpt-academic
|
||||||
|
#(마지막 단계-2 선택) macOS / windows 환경에서는 -p 옵션을 사용하여 컨테이너의 포트 (예 : 50923)를 호스트의 포트로 노출해야합니다.
|
||||||
|
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
|
||||||
|
```
|
||||||
|
|
||||||
|
2. ChatGPT + ChatGLM + MOSS (Docker에 익숙해야합니다.)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
#docker-compose.yml을 수정하여 계획 1 및 계획 3을 삭제하고 계획 2를 유지합니다. docker-compose.yml에서 계획 2의 구성을 수정하면 됩니다. 주석을 참조하십시오.
|
||||||
|
docker-compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
3. ChatGPT + LLAMA + Pangu + RWKV (Docker에 익숙해야합니다.)
|
||||||
|
``` sh
|
||||||
|
#docker-compose.yml을 수정하여 계획 1 및 계획 2을 삭제하고 계획 3을 유지합니다. docker-compose.yml에서 계획 3의 구성을 수정하면 됩니다. 주석을 참조하십시오.
|
||||||
|
docker-compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## 설치 - 방법 3 : 다른 배치 방법
|
||||||
|
|
||||||
|
1. 리버스 프록시 URL / Microsoft Azure API 사용 방법
|
||||||
|
API_URL_REDIRECT를 `config.py`에 따라 구성하면됩니다.
|
||||||
|
|
||||||
|
2. 원격 클라우드 서버 배치 (클라우드 서버 지식과 경험이 필요합니다.)
|
||||||
|
[배치위키-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)에 방문하십시오.
|
||||||
|
|
||||||
|
3. WSL2 사용 (Windows Subsystem for Linux 하위 시스템)
|
||||||
|
[배치 위키-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)에 방문하십시오.
|
||||||
|
|
||||||
|
4. 2 차 URL (예 : `http : //localhost/subpath`)에서 실행하는 방법
|
||||||
|
[FastAPI 실행 설명서] (docs / WithFastapi.md)를 참조하십시오.
|
||||||
|
|
||||||
|
5. docker-compose 실행
|
||||||
|
docker-compose.yml을 읽은 후 지시 사항에 따라 작업하십시오.
|
||||||
|
---
|
||||||
|
# 고급 사용법
|
||||||
|
## 사용자 정의 바로 가기 버튼 / 사용자 정의 함수 플러그인
|
||||||
|
|
||||||
|
1. 사용자 정의 바로 가기 버튼 (학술 바로 가기)
|
||||||
|
임의의 텍스트 편집기로 'core_functional.py'를 엽니다. 엔트리 추가, 그런 다음 프로그램을 다시 시작하면됩니다. (버튼이 이미 추가되어 보이고 접두사, 접미사가 모두 변수가 효과적으로 수정되면 프로그램을 다시 시작하지 않아도됩니다.)
|
||||||
|
예 :
|
||||||
|
```
|
||||||
|
"超级英译中": {
|
||||||
|
# 접두사. 당신이 요구하는 것을 설명하는 데 사용됩니다. 예를 들어 번역, 코드를 설명, 다듬기 등
|
||||||
|
"Prefix": "下面翻译成中文,然后用一个 markdown 表格逐一解释文中出现的专有名词:\n\n",
|
||||||
|
|
||||||
|
# 접미사는 입력 내용 앞뒤에 추가됩니다. 예를 들어 전위를 사용하여 입력 내용을 따옴표로 묶는데 사용할 수 있습니다.
|
||||||
|
"Suffix": "",
|
||||||
|
},
|
||||||
|
```
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
2. 사용자 지정 함수 플러그인
|
||||||
|
강력한 함수 플러그인을 작성하여 원하는 작업을 수행하십시오.
|
||||||
|
이 프로젝트의 플러그인 작성 및 디버깅 난이도는 매우 낮으며, 일부 파이썬 기본 지식만 있으면 제공된 템플릿을 모방하여 플러그인 기능을 구현할 수 있습니다. 자세한 내용은 [함수 플러그인 가이드]를 참조하십시오. (https://github.com/binary -husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E 4%BB%B6%E6%8C%87%E5%8D%97).
|
||||||
|
---
|
||||||
|
# 최신 업데이트
|
||||||
|
## 새로운 기능 동향1. 대화 저장 기능.
|
||||||
|
|
||||||
|
1. 함수 플러그인 영역에서 '현재 대화 저장'을 호출하면 현재 대화를 읽을 수 있고 복원 가능한 HTML 파일로 저장할 수 있습니다. 또한 함수 플러그인 영역(드롭다운 메뉴)에서 '대화 기록 불러오기'를 호출하면 이전 대화를 복원할 수 있습니다. 팁: 파일을 지정하지 않고 '대화 기록 불러오기'를 클릭하면 기록된 HTML 캐시를 볼 수 있으며 '모든 로컬 대화 기록 삭제'를 클릭하면 모든 HTML 캐시를 삭제할 수 있습니다.
|
||||||
|
|
||||||
|
2. 보고서 생성. 대부분의 플러그인은 실행이 끝난 후 작업 보고서를 생성합니다.
|
||||||
|
|
||||||
|
3. 모듈화 기능 설계, 간단한 인터페이스로도 강력한 기능을 지원할 수 있습니다.
|
||||||
|
|
||||||
|
4. 자체 번역이 가능한 오픈 소스 프로젝트입니다.
|
||||||
|
|
||||||
|
5. 다른 오픈 소스 프로젝트를 번역하는 것은 어렵지 않습니다.
|
||||||
|
|
||||||
|
6. [live2d](https://github.com/fghrsh/live2d_demo) 장식 기능(기본적으로 비활성화되어 있으며 `config.py`를 수정해야 합니다.)
|
||||||
|
|
||||||
|
7. MOSS 대 언어 모델 지원 추가
|
||||||
|
|
||||||
|
8. OpenAI 이미지 생성
|
||||||
|
|
||||||
|
9. OpenAI 음성 분석 및 요약
|
||||||
|
|
||||||
|
10. LaTeX 전체적인 교정 및 오류 수정
|
||||||
|
|
||||||
|
## 버전:
|
||||||
|
- version 3.5 (TODO): 자연어를 사용하여 이 프로젝트의 모든 함수 플러그인을 호출하는 기능(우선순위 높음)
|
||||||
|
- version 3.4(TODO): 로컬 대 모듈의 다중 스레드 지원 향상
|
||||||
|
- version 3.3: 인터넷 정보 종합 기능 추가
|
||||||
|
- version 3.2: 함수 플러그인이 더 많은 인수 인터페이스를 지원합니다.(대화 저장 기능, 임의의 언어 코드 해석 및 동시에 임의의 LLM 조합을 확인하는 기능)
|
||||||
|
- version 3.1: 여러 개의 GPT 모델에 대한 동시 쿼리 지원! api2d 지원, 여러 개의 apikey 로드 밸런싱 지원
|
||||||
|
- version 3.0: chatglm 및 기타 소형 llm의 지원
|
||||||
|
- version 2.6: 플러그인 구조를 재구성하여 상호 작용성을 향상시켰습니다. 더 많은 플러그인을 추가했습니다.
|
||||||
|
- version 2.5: 자체 업데이트, 전체 프로젝트를 요약할 때 텍스트가 너무 길어지고 토큰이 오버플로우되는 문제를 해결했습니다.
|
||||||
|
- version 2.4: (1) PDF 전체 번역 기능 추가; (2) 입력 영역 위치 전환 기능 추가; (3) 수직 레이아웃 옵션 추가; (4) 다중 스레드 함수 플러그인 최적화.
|
||||||
|
- version 2.3: 다중 스레드 상호 작용성 강화
|
||||||
|
- version 2.2: 함수 플러그인 히트 리로드 지원
|
||||||
|
- version 2.1: 접는 레이아웃 지원
|
||||||
|
- version 2.0: 모듈화 함수 플러그인 도입
|
||||||
|
- version 1.0: 기본 기능
|
||||||
|
|
||||||
|
gpt_academic 개발자 QQ 그룹-2 : 610599535
|
||||||
|
|
||||||
|
- 알려진 문제
|
||||||
|
- 일부 브라우저 번역 플러그인이이 소프트웨어의 프론트 엔드 작동 방식을 방해합니다.
|
||||||
|
- gradio 버전이 너무 높거나 낮으면 여러 가지 이상이 발생할 수 있습니다.
|
||||||
|
|
||||||
|
## 참고 및 학습 자료
|
||||||
|
|
||||||
|
```
|
||||||
|
많은 우수 프로젝트의 디자인을 참고했습니다. 주요 항목은 다음과 같습니다.
|
||||||
|
|
||||||
|
# 프로젝트 1 : Tsinghua ChatGLM-6B :
|
||||||
|
https://github.com/THUDM/ChatGLM-6B
|
||||||
|
|
||||||
|
# 프로젝트 2 : Tsinghua JittorLLMs:
|
||||||
|
https://github.com/Jittor/JittorLLMs
|
||||||
|
|
||||||
|
# 프로젝트 3 : Edge-GPT :
|
||||||
|
https://github.com/acheong08/EdgeGPT
|
||||||
|
|
||||||
|
# 프로젝트 4 : ChuanhuChatGPT:
|
||||||
|
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||||
|
|
||||||
|
# 프로젝트 5 : ChatPaper :
|
||||||
|
https://github.com/kaixindelele/ChatPaper
|
||||||
|
|
||||||
|
# 더 많은 :
|
||||||
|
https://github.com/gradio-app/gradio
|
||||||
|
https://github.com/fghrsh/live2d_demo
|
||||||
|
```
|
||||||
324
docs/README.md.Portuguese.md
普通文件
324
docs/README.md.Portuguese.md
普通文件
@@ -0,0 +1,324 @@
|
|||||||
|
> **Nota**
|
||||||
|
>
|
||||||
|
> Ao instalar as dependências, por favor, selecione rigorosamente as versões **especificadas** no arquivo requirements.txt.
|
||||||
|
>
|
||||||
|
> `pip install -r requirements.txt`
|
||||||
|
>
|
||||||
|
|
||||||
|
# <img src="logo.png" width="40" > Otimização acadêmica GPT (GPT Academic)
|
||||||
|
|
||||||
|
**Se você gostou deste projeto, por favor dê um Star. Se você criou atalhos acadêmicos mais úteis ou plugins funcionais, sinta-se livre para abrir uma issue ou pull request. Nós também temos um README em [Inglês|](README_EN.md)[日本語|](README_JP.md)[한국어|](https://github.com/mldljyh/ko_gpt_academic)[Русский|](README_RS.md)[Français](README_FR.md) traduzidos por este próprio projeto.
|
||||||
|
Para traduzir este projeto para qualquer idioma com o GPT, leia e execute [`multi_language.py`](multi_language.py) (experimental).
|
||||||
|
|
||||||
|
> **Nota**
|
||||||
|
>
|
||||||
|
> 1. Por favor, preste atenção que somente os plugins de funções (botões) com a cor **vermelha** podem ler arquivos. Alguns plugins estão localizados no **menu suspenso** na área de plugins. Além disso, nós damos as boas-vindas com a **maior prioridade** e gerenciamos quaisquer novos plugins PR!
|
||||||
|
>
|
||||||
|
> 2. As funções de cada arquivo neste projeto são detalhadas em [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A), auto-análises do projeto geradas pelo GPT também estão podem ser chamadas a qualquer momento ao clicar nos plugins relacionados. As perguntas frequentes estão resumidas no [`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Instruções de Instalação](#installation).
|
||||||
|
>
|
||||||
|
> 3. Este projeto é compatível com e incentiva o uso de modelos de linguagem nacionais, como chatglm e RWKV, Pangolin, etc. Suporta a coexistência de várias chaves de API e pode ser preenchido no arquivo de configuração como `API_KEY="openai-key1,openai-key2,api2d-key3"`. Quando precisar alterar temporariamente o `API_KEY`, basta digitar o `API_KEY` temporário na área de entrada e pressionar Enter para que ele entre em vigor.
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
|
||||||
|
Funcionalidade | Descrição
|
||||||
|
--- | ---
|
||||||
|
Um clique de polimento | Suporte a um clique polimento, um clique encontrar erros de gramática no artigo
|
||||||
|
Tradução chinês-inglês de um clique | Tradução chinês-inglês de um clique
|
||||||
|
Explicação de código de um único clique | Exibir código, explicar código, gerar código, adicionar comentários ao código
|
||||||
|
[Teclas de atalho personalizadas](https://www.bilibili.com/video/BV14s4y1E7jN) | Suporte a atalhos personalizados
|
||||||
|
Projeto modular | Suporte para poderosos plugins[de função personalizada](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), os plugins suportam[hot-reload](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
||||||
|
[Análise automática do programa](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin de função][um clique para entender](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) o código-fonte do projeto
|
||||||
|
[Análise do programa](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugin de função] Um clique pode analisar a árvore de projetos do Python/C/C++/Java/Lua/...
|
||||||
|
Leitura de artigos, [tradução](https://www.bilibili.com/video/BV1KT411x7Wn) de artigos | [Plugin de função] um clique para interpretar o resumo de artigos LaTeX/PDF e gerar resumo
|
||||||
|
Tradução completa LATEX, polimento|[Plugin de função] Uma clique para traduzir ou polir um artigo LATEX
|
||||||
|
Geração em lote de comentários | [Plugin de função] Um clique gera comentários de função em lote
|
||||||
|
[Tradução chinês-inglês](https://www.bilibili.com/video/BV1yo4y157jV/) markdown | [Plugin de função] Você viu o README em 5 linguagens acima?
|
||||||
|
Relatório de análise de chat | [Plugin de função] Gera automaticamente um resumo após a execução
|
||||||
|
[Funcionalidade de tradução de artigos completos em PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugin de função] Extrai o título e o resumo do artigo PDF e traduz o artigo completo (multithread)
|
||||||
|
Assistente arXiv | [Plugin de função] Insira o url do artigo arXiv para traduzir o resumo + baixar PDF
|
||||||
|
Assistente de integração acadêmica do Google | [Plugin de função] Dê qualquer URL de página de pesquisa acadêmica do Google e deixe o GPT escrever[trabalhos relacionados](https://www.bilibili.com/video/BV1GP411U7Az/)
|
||||||
|
Agregação de informações da Internet + GPT | [Plugin de função] Um clique para obter informações do GPT através da Internet e depois responde a perguntas para informações nunca ficarem desatualizadas
|
||||||
|
Exibição de fórmulas/imagem/tabela | Pode exibir simultaneamente a forma de renderização e[TEX] das fórmulas, suporte a fórmulas e realce de código
|
||||||
|
Suporte de plugins de várias linhas | Suporte a várias chamadas em linha do chatgpt, um clique para processamento[de massa de texto](https://www.bilibili.com/video/BV1FT411H7c5/) ou programa
|
||||||
|
Tema gradio escuro | Adicione ``` /?__theme=dark``` ao final da url do navegador para ativar o tema escuro
|
||||||
|
[Suporte para vários modelos LLM](https://www.bilibili.com/video/BV1wT411p7yf), suporte para a nova interface API2D | A sensação de ser atendido simultaneamente por GPT3.5, GPT4, [Chatglm THU](https://github.com/THUDM/ChatGLM-6B), [Moss Fudan](https://github.com/OpenLMLab/MOSS) deve ser ótima, certo?
|
||||||
|
Mais modelos LLM incorporados, suporte para a implantação[huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Adicione interface Newbing (New Bing), suporte [JittorLLMs](https://github.com/Jittor/JittorLLMs) THU Introdução ao suporte do LLaMA, RWKV e Pan Gu Alpha
|
||||||
|
Mais recursos novos mostrados (geração de imagens, etc.) ... | Consulte o final deste documento ...
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Nova interface (Modifique a opção LAYOUT em `config.py` para alternar entre o layout esquerdo/direito e o layout superior/inferior)
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||||
|
</div>- All buttons are dynamically generated by reading functional.py, and you can add custom functions at will, liberating the clipboard
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src = "https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Proofreading/errors correction
|
||||||
|
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src = "https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- If the output contains formulas, it will be displayed in both tex and rendering format at the same time, which is convenient for copying and reading
|
||||||
|
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src = "https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Don't want to read the project code? Just show the whole project to chatgpt
|
||||||
|
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src = "https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Mix the use of multiple large language models (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
||||||
|
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src = "https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
---
|
||||||
|
# Instalação
|
||||||
|
## Installation-Method 1: Run directly (Windows, Linux or MacOS)
|
||||||
|
|
||||||
|
1. Download the project
|
||||||
|
|
||||||
|
```sh
|
||||||
|
git clone https://github.com/binary-husky/gpt_academic.git
|
||||||
|
cd gpt_academic
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Configure the API KEY
|
||||||
|
|
||||||
|
In `config.py`, configure API KEY and other settings, [Special Network Environment Settings] (https://github.com/binary-husky/gpt_academic/issues/1).
|
||||||
|
|
||||||
|
(P.S. When the program runs, it will first check whether there is a private configuration file named `config_private.py`, and use the configuration in it to cover the configuration with the same name in `config.py`. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py`, and transfer (copy) the configuration in `config.py` to `config_private.py`. `config_private.py` is not controlled by git and can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`. The writing format of environment variables is referenced to the `docker-compose` file. Reading priority: `environment variable` > `config_private.py` > `config.py`)
|
||||||
|
|
||||||
|
|
||||||
|
3. Install dependencies
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# (Option I: for those familiar with python)(python version is 3.9 or above, the newer the better), note: use the official pip source or the Alibaba pip source. Temporary solution for changing source: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||||
|
python -m pip install -r requirements.txt
|
||||||
|
|
||||||
|
# (Option II: for those who are unfamiliar with python) use anaconda, the steps are also similar (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||||
|
conda create -n gptac_venv python=3.11 # create anaconda environment
|
||||||
|
conda activate gptac_venv # activate anaconda environment
|
||||||
|
python -m pip install -r requirements.txt # This step is the same as the pip installation step
|
||||||
|
```
|
||||||
|
|
||||||
|
<details><summary>If you need to support Tsinghua ChatGLM / Fudan MOSS as the backend, click to expand here</summary>
|
||||||
|
<p>
|
||||||
|
|
||||||
|
[Optional Step] If you need to support Tsinghua ChatGLM / Fudan MOSS as the backend, you need to install more dependencies (prerequisite: familiar with Python + used Pytorch + computer configuration is strong):
|
||||||
|
```sh
|
||||||
|
# 【Optional Step I】support Tsinghua ChatGLM。Tsinghua ChatGLM Note: If you encounter a "Call ChatGLM fails cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installed is torch+cpu version, and using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient computer configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||||
|
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||||
|
|
||||||
|
# 【Optional Step II】support Fudan MOSS
|
||||||
|
python -m pip install -r request_llm/requirements_moss.txt
|
||||||
|
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note: When executing this line of code, you must be in the project root path
|
||||||
|
|
||||||
|
# 【Optional Step III】Make sure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports docker solutions):
|
||||||
|
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||||
|
```
|
||||||
|
|
||||||
|
</p>
|
||||||
|
</details>
|
||||||
|
|
||||||
|
|
||||||
|
4. Run
|
||||||
|
|
||||||
|
```sh
|
||||||
|
python main.py
|
||||||
|
```5. Plugin de Função de Teste
|
||||||
|
```
|
||||||
|
- Função de modelo de plug-in de teste (exige que o GPT responda ao que aconteceu hoje na história), você pode usar esta função como modelo para implementar funções mais complexas
|
||||||
|
Clique em "[Função de plug-in de modelo de demonstração] O que aconteceu hoje na história?"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Instalação - Método 2: Usando o Docker
|
||||||
|
|
||||||
|
1. Apenas ChatGPT (recomendado para a maioria das pessoas)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
git clone https://github.com/binary-husky/gpt_academic.git # Baixar o projeto
|
||||||
|
cd gpt_academic # Entrar no caminho
|
||||||
|
nano config.py # Editar config.py com qualquer editor de texto configurando "Proxy", "API_KEY" e "WEB_PORT" (por exemplo, 50923), etc.
|
||||||
|
docker build -t gpt-academic . # Instale
|
||||||
|
|
||||||
|
# (Ùltima etapa - escolha 1) Dentro do ambiente Linux, é mais fácil e rápido usar `--net=host`
|
||||||
|
docker run --rm -it --net=host gpt-academic
|
||||||
|
# (Última etapa - escolha 2) Em ambientes macOS/windows, você só pode usar a opção -p para expor a porta do contêiner (por exemplo, 50923) para a porta no host
|
||||||
|
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
|
||||||
|
```
|
||||||
|
|
||||||
|
2. ChatGPT + ChatGLM + MOSS (conhecimento de Docker necessário)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
# Edite o arquivo docker-compose.yml, remova as soluções 1 e 3, mantenha a solução 2, e siga as instruções nos comentários do arquivo
|
||||||
|
docker-compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
3. ChatGPT + LLAMA + Pangu + RWKV (conhecimento de Docker necessário)
|
||||||
|
``` sh
|
||||||
|
# Edite o arquivo docker-compose.yml, remova as soluções 1 e 2, mantenha a solução 3, e siga as instruções nos comentários do arquivo
|
||||||
|
docker-compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Instalação - Método 3: Outros Métodos de Implantação
|
||||||
|
|
||||||
|
1. Como usar URLs de proxy inverso/microsoft Azure API
|
||||||
|
Basta configurar o API_URL_REDIRECT de acordo com as instruções em `config.py`.
|
||||||
|
|
||||||
|
2. Implantação em servidores em nuvem remotos (requer conhecimento e experiência de servidores em nuvem)
|
||||||
|
Acesse [Wiki de implementação remota do servidor em nuvem](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||||
|
|
||||||
|
3. Usando a WSL2 (sub-sistema do Windows para Linux)
|
||||||
|
Acesse [Wiki da implantação da WSL2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||||
|
|
||||||
|
4. Como executar em um subdiretório (ex. `http://localhost/subpath`)
|
||||||
|
Acesse [Instruções de execução FastAPI](docs/WithFastapi.md)
|
||||||
|
|
||||||
|
5. Execute usando o docker-compose
|
||||||
|
Leia o arquivo docker-compose.yml e siga as instruções.
|
||||||
|
|
||||||
|
# Uso Avançado
|
||||||
|
## Customize novos botões de acesso rápido / plug-ins de função personalizados
|
||||||
|
|
||||||
|
1. Personalizar novos botões de acesso rápido (atalhos acadêmicos)
|
||||||
|
Abra `core_functional.py` em qualquer editor de texto e adicione os seguintes itens e reinicie o programa (Se o botão já foi adicionado e pode ser visto, prefixos e sufixos são compatíveis com modificações em tempo real e não exigem reinício do programa para ter efeito.)
|
||||||
|
Por exemplo,
|
||||||
|
```
|
||||||
|
"Super Eng:": {
|
||||||
|
# Prefixo, será adicionado antes da sua entrada. Por exemplo, para descrever sua solicitação, como tradução, explicação de código, polimento, etc.
|
||||||
|
"Prefix": "Por favor, traduza o seguinte conteúdo para chinês e use uma tabela em Markdown para explicar termos próprios no texto: \n \n",
|
||||||
|
|
||||||
|
# Sufixo, será adicionado após a sua entrada. Por exemplo, emparelhado com o prefixo, pode colocar sua entrada entre aspas.
|
||||||
|
"Suffix": "",
|
||||||
|
},
|
||||||
|
```
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
2. Personalizar plug-ins de função
|
||||||
|
|
||||||
|
Escreva plug-ins de função poderosos para executar tarefas que você deseja e não pensava possível.
|
||||||
|
A dificuldade geral de escrever e depurar plug-ins neste projeto é baixa e, se você tem algum conhecimento básico de python, pode implementar suas próprias funções sobre o modelo que fornecemos.
|
||||||
|
Para mais detalhes, consulte o [Guia do plug-in de função.](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
|
||||||
|
|
||||||
|
---
|
||||||
|
# Última atualização
|
||||||
|
## Novas funções dinâmicas.
|
||||||
|
|
||||||
|
1. Função de salvamento de diálogo. Ao chamar o plug-in de função "Salvar diálogo atual", é possível salvar o diálogo atual em um arquivo html legível e reversível. Além disso, ao chamar o plug-in de função "Carregar arquivo de histórico de diálogo" no menu suspenso da área de plug-in, é possível restaurar uma conversa anterior. Dica: clicar em "Carregar arquivo de histórico de diálogo" sem especificar um arquivo permite visualizar o cache do arquivo html de histórico. Clicar em "Excluir todo o registro de histórico de diálogo local" permite excluir todo o cache de arquivo html.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/235222390-24a9acc0-680f-49f5-bc81-2f3161f1e049.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
2. Geração de relatório. A maioria dos plug-ins gera um relatório de trabalho após a conclusão da execução.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
3. Design modular de funcionalidades, com interfaces simples, mas suporte a recursos poderosos
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
4. Este é um projeto de código aberto que é capaz de "auto-traduzir-se".
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
5. A tradução de outros projetos de código aberto é simples.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
6. Recursos decorativos para o [live2d](https://github.com/fghrsh/live2d_demo) (desativados por padrão, é necessário modificar o arquivo `config.py`)
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/236432361-67739153-73e8-43fe-8111-b61296edabd9.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
7. Suporte ao modelo de linguagem MOSS
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/236639178-92836f37-13af-4fdd-984d-b4450fe30336.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
8. Geração de imagens pelo OpenAI
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
9. Análise e resumo de áudio pelo OpenAI
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
10. Revisão e correção de erros de texto em Latex.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/651ccd98-02c9-4464-91e1-77a6b7d1b033" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## Versão:
|
||||||
|
- Versão 3.5(Todo): Usar linguagem natural para chamar todas as funções do projeto (prioridade alta)
|
||||||
|
- Versão 3.4(Todo): Melhorar o suporte à multithread para o chatglm local
|
||||||
|
- Versão 3.3: +Funções integradas de internet
|
||||||
|
- Versão 3.2: Suporte a mais interfaces de parâmetros de plug-in (função de salvar diálogo, interpretação de códigos de várias linguagens, perguntas de combinações LLM arbitrárias ao mesmo tempo)
|
||||||
|
- Versão 3.1: Suporte a perguntas a vários modelos de gpt simultaneamente! Suporte para api2d e balanceamento de carga para várias chaves api
|
||||||
|
- Versão 3.0: Suporte ao chatglm e outros LLMs de pequeno porte
|
||||||
|
- Versão 2.6: Refatoração da estrutura de plug-in, melhoria da interatividade e adição de mais plug-ins
|
||||||
|
- Versão 2.5: Autoatualização, resolvendo problemas de token de texto excessivamente longo e estouro ao compilar grandes projetos
|
||||||
|
- Versão 2.4: (1) Adição de funcionalidade de tradução de texto completo em PDF; (2) Adição de funcionalidade de mudança de posição da área de entrada; (3) Adição de opção de layout vertical; (4) Otimização de plug-ins de multithread.
|
||||||
|
- Versão 2.3: Melhoria da interatividade de multithread
|
||||||
|
- Versão 2.2: Suporte à recarga a quente de plug-ins
|
||||||
|
- Versão 2.1: Layout dobrável
|
||||||
|
- Versão 2.0: Introdução de plug-ins de função modular
|
||||||
|
- Versão 1.0: Funcionalidades básicasgpt_academic desenvolvedores QQ grupo-2: 610599535
|
||||||
|
|
||||||
|
- Problemas conhecidos
|
||||||
|
- Extensões de tradução de alguns navegadores podem interferir na execução do front-end deste software
|
||||||
|
- Uma versão muito alta ou muito baixa do Gradio pode causar vários erros
|
||||||
|
|
||||||
|
## Referências e Aprendizado
|
||||||
|
|
||||||
|
```
|
||||||
|
Foi feita referência a muitos projetos excelentes em código, principalmente:
|
||||||
|
|
||||||
|
# Projeto1: ChatGLM-6B da Tsinghua:
|
||||||
|
https://github.com/THUDM/ChatGLM-6B
|
||||||
|
|
||||||
|
# Projeto2: JittorLLMs da Tsinghua:
|
||||||
|
https://github.com/Jittor/JittorLLMs
|
||||||
|
|
||||||
|
# Projeto3: Edge-GPT:
|
||||||
|
https://github.com/acheong08/EdgeGPT
|
||||||
|
|
||||||
|
# Projeto4: ChuanhuChatGPT:
|
||||||
|
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||||
|
|
||||||
|
# Projeto5: ChatPaper:
|
||||||
|
https://github.com/kaixindelele/ChatPaper
|
||||||
|
|
||||||
|
# Mais:
|
||||||
|
https://github.com/gradio-app/gradio
|
||||||
|
https://github.com/fghrsh/live2d_demo
|
||||||
|
```
|
||||||
322
docs/README_EN.md
普通文件
322
docs/README_EN.md
普通文件
@@ -0,0 +1,322 @@
|
|||||||
|
> **Note**
|
||||||
|
>
|
||||||
|
> This English README is automatically generated by the markdown translation plugin in this project, and may not be 100% correct.
|
||||||
|
>
|
||||||
|
> When installing dependencies, **please strictly select the versions** specified in requirements.txt.
|
||||||
|
>
|
||||||
|
> `pip install -r requirements.txt`
|
||||||
|
|
||||||
|
# GPT Academic Optimization (GPT Academic)
|
||||||
|
|
||||||
|
**If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request.
|
||||||
|
To translate this project to arbitary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).**
|
||||||
|
|
||||||
|
> Note:
|
||||||
|
>
|
||||||
|
> 1. Please note that only the function plugins (buttons) marked in **red** support reading files. Some plugins are in the **drop-down menu** in the plugin area. We welcome and process any new plugins with the **highest priority**!
|
||||||
|
> 2. The function of each file in this project is detailed in the self-translation analysis [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). With version iteration, you can also click on related function plugins at any time to call GPT to regenerate the project's self-analysis report. Common questions are summarized in the [`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Installation method](#installation).
|
||||||
|
> 3. This project is compatible with and encourages trying domestic large language models such as chatglm, RWKV, Pangu, etc. Multiple API keys are supported and can be filled in the configuration file like `API_KEY="openai-key1,openai-key2,api2d-key3"`. When temporarily changing `API_KEY`, enter the temporary `API_KEY` in the input area and press enter to submit, which will take effect.
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
|
||||||
|
Function | Description
|
||||||
|
--- | ---
|
||||||
|
One-click polishing | Supports one-click polishing and one-click searching for grammar errors in papers.
|
||||||
|
One-click Chinese-English translation | One-click Chinese-English translation.
|
||||||
|
One-click code interpretation | Displays, explains, generates, and adds comments to code.
|
||||||
|
[Custom shortcut keys](https://www.bilibili.com/video/BV14s4y1E7jN) | Supports custom shortcut keys.
|
||||||
|
Modular design | Supports custom powerful [function plug-ins](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), plug-ins support [hot update](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
|
||||||
|
[Self-program profiling](https://www.bilibili.com/video/BV1cj411A7VW) | [Function plug-in] [One-click understanding](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) of the source code of this project
|
||||||
|
[Program profiling](https://www.bilibili.com/video/BV1cj411A7VW) | [Function plug-in] One-click profiling of other project trees in Python/C/C++/Java/Lua/...
|
||||||
|
Reading papers, [translating](https://www.bilibili.com/video/BV1KT411x7Wn) papers | [Function Plug-in] One-click interpretation of latex/pdf full-text papers and generation of abstracts.
|
||||||
|
Latex full-text [translation](https://www.bilibili.com/video/BV1nk4y1Y7Js/), [polishing](https://www.bilibili.com/video/BV1FT411H7c5/) | [Function plug-in] One-click translation or polishing of latex papers.
|
||||||
|
Batch annotation generation | [Function plug-in] One-click batch generation of function annotations.
|
||||||
|
Markdown [Chinese-English translation](https://www.bilibili.com/video/BV1yo4y157jV/) | [Function plug-in] Have you seen the [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) in the five languages above?
|
||||||
|
Chat analysis report generation | [Function plug-in] Automatically generate summary reports after running.
|
||||||
|
[PDF full-text translation function](https://www.bilibili.com/video/BV1KT411x7Wn) | [Function plug-in] PDF paper extract title & summary + translate full text (multi-threaded)
|
||||||
|
[Arxiv Assistant](https://www.bilibili.com/video/BV1LM4y1279X) | [Function plug-in] Enter the arxiv article url and you can translate abstracts and download PDFs with one click.
|
||||||
|
[Google Scholar Integration Assistant](https://www.bilibili.com/video/BV19L411U7ia) | [Function plug-in] Given any Google Scholar search page URL, let GPT help you [write relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/)
|
||||||
|
Internet information aggregation+GPT | [Function plug-in] One-click [let GPT get information from the Internet first](https://www.bilibili.com/video/BV1om4y127ck), then answer questions, and let the information never be outdated.
|
||||||
|
Formula/image/table display | Can display formulas in both [tex form and render form](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), support formulas and code highlighting.
|
||||||
|
Multi-threaded function plug-in support | Supports multi-threaded calling of chatgpt, and can process [massive text](https://www.bilibili.com/video/BV1FT411H7c5/) or programs with one click.
|
||||||
|
Start Dark Gradio [theme](https://github.com/binary-husky/gpt_academic/issues/173) | Add ```/?__theme=dark``` after the browser URL to switch to the dark theme.
|
||||||
|
[Multiple LLM models](https://www.bilibili.com/video/BV1wT411p7yf) support, [API2D](https://api2d.com/) interface support | The feeling of being served by GPT3.5, GPT4, [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B), and [Fudan MOSS](https://github.com/OpenLMLab/MOSS) at the same time must be great, right?
|
||||||
|
More LLM model access, support [huggingface deployment](https://huggingface.co/spaces/qingxu98/gpt-academic) | Add Newbing interface (New Bing), introduce Tsinghua [Jittorllms](https://github.com/Jittor/JittorLLMs) to support [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) and [Panguα](https://openi.org.cn/pangu/)
|
||||||
|
More new feature displays (image generation, etc.)…… | See the end of this document for more...
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- New interface (modify the LAYOUT option in `config.py` to switch between "left and right layout" and "up and down layout")
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||||
|
</div>- All buttons are dynamically generated by reading `functional.py`, and you can add custom functions freely to unleash the power of clipboard.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- polishing/correction
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- If the output contains formulas, they will be displayed in both `tex` and render form, making it easy to copy and read.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Tired of reading the project code? ChatGPT can explain it all.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Multiple large language models are mixed, such as ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
---
|
||||||
|
# Installation
|
||||||
|
## Method 1: Directly running (Windows, Linux or MacOS)
|
||||||
|
|
||||||
|
1. Download the project
|
||||||
|
```sh
|
||||||
|
git clone https://github.com/binary-husky/gpt_academic.git
|
||||||
|
cd gpt_academic
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Configure the API_KEY
|
||||||
|
|
||||||
|
Configure the API KEY in `config.py`, [special network environment settings](https://github.com/binary-husky/gpt_academic/issues/1).
|
||||||
|
|
||||||
|
(P.S. When the program is running, it will first check if there is a private configuration file named `config_private.py` and use the configurations in it to override the same configurations in `config.py`. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py` and transfer (copy) the configurations in `config.py` to `config_private.py`. `config_private.py` is not controlled by git and can make your private information more secure. P.S. The project also supports configuring most options through `environment variables`. Please refer to the format of `docker-compose` file when writing. Reading priority: `environment variables` > `config_private.py` > `config.py`)
|
||||||
|
|
||||||
|
|
||||||
|
3. Install the dependencies
|
||||||
|
```sh
|
||||||
|
# (Option I: If familiar with python) (python version 3.9 or above, the newer the better), note: use official pip source or Ali pip source, temporary switching method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||||
|
python -m pip install -r requirements.txt
|
||||||
|
|
||||||
|
# (Option II: If not familiar with python) Use anaconda, the steps are similar (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||||
|
conda create -n gptac_venv python=3.11 # create anaconda environment
|
||||||
|
conda activate gptac_venv # activate anaconda environment
|
||||||
|
python -m pip install -r requirements.txt # this step is the same as pip installation
|
||||||
|
```
|
||||||
|
|
||||||
|
<details><summary>If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, click to expand</summary>
|
||||||
|
<p>
|
||||||
|
|
||||||
|
[Optional step] If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, you need to install more dependencies (prerequisites: familiar with Python + used Pytorch + computer configuration is strong enough):
|
||||||
|
```sh
|
||||||
|
# [Optional Step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: if you encounter the "Call ChatGLM fail cannot load ChatGLM parameters" error, refer to this: 1: The default installation above is torch + cpu version, to use cuda, you need to uninstall torch and reinstall torch + cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code = True)
|
||||||
|
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||||
|
|
||||||
|
# [Optional Step II] Support Fudan MOSS
|
||||||
|
python -m pip install -r request_llm/requirements_moss.txt
|
||||||
|
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # When executing this line of code, you must be in the root directory of the project
|
||||||
|
|
||||||
|
# [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file includes the expected models. Currently supported models are as follows (the jittorllms series only supports the docker solution for the time being):
|
||||||
|
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||||
|
```
|
||||||
|
|
||||||
|
</p>
|
||||||
|
</details>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
4. Run it
|
||||||
|
```sh
|
||||||
|
python main.py
|
||||||
|
```5. Test Function Plugin
|
||||||
|
```
|
||||||
|
- Test function plugin template function (ask GPT what happened today in history), based on which you can implement more complex functions as a template
|
||||||
|
Click "[Function Plugin Template Demo] Today in History"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Installation - Method 2: Using Docker
|
||||||
|
|
||||||
|
1. ChatGPT Only (Recommended for Most People)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
git clone https://github.com/binary-husky/gpt_academic.git # Download project
|
||||||
|
cd gpt_academic # Enter path
|
||||||
|
nano config.py # Edit config.py with any text editor, configure "Proxy", "API_KEY" and "WEB_PORT" (e.g. 50923), etc.
|
||||||
|
docker build -t gpt-academic . # Install
|
||||||
|
|
||||||
|
#(Last step - option 1) In a Linux environment, use `--net=host` for convenience and speed.
|
||||||
|
docker run --rm -it --net=host gpt-academic
|
||||||
|
#(Last step - option 2) On macOS/windows environment, only -p option can be used to expose the container's port (e.g. 50923) to the port of the main machine.
|
||||||
|
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
|
||||||
|
```
|
||||||
|
|
||||||
|
2. ChatGPT + ChatGLM + MOSS (Requires Docker Knowledge)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
# Modify docker-compose.yml, delete Plan 1 and Plan 3, and keep Plan 2. Modify the configuration of Plan 2 in docker-compose.yml, refer to the comments in it for configuration.
|
||||||
|
docker-compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
3. ChatGPT + LLAMA + Pangu + RWKV (Requires Docker Knowledge)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
# Modify docker-compose.yml, delete Plan 1 and Plan 2, and keep Plan 3. Modify the configuration of Plan 3 in docker-compose.yml, refer to the comments in it for configuration.
|
||||||
|
docker-compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
## Installation - Method 3: Other Deployment Options
|
||||||
|
|
||||||
|
1. How to Use Reverse Proxy URL/Microsoft Cloud Azure API
|
||||||
|
Configure API_URL_REDIRECT according to the instructions in 'config.py'.
|
||||||
|
|
||||||
|
2. Deploy to a Remote Server (Requires Knowledge and Experience with Cloud Servers)
|
||||||
|
Please visit [Deployment Wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||||
|
|
||||||
|
3. Using WSL2 (Windows Subsystem for Linux)
|
||||||
|
Please visit [Deployment Wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||||
|
|
||||||
|
4. How to Run Under a Subdomain (e.g. `http://localhost/subpath`)
|
||||||
|
Please visit [FastAPI Running Instructions](docs/WithFastapi.md)
|
||||||
|
|
||||||
|
5. Using docker-compose to Run
|
||||||
|
Read the docker-compose.yml and follow the prompts.
|
||||||
|
|
||||||
|
---
|
||||||
|
# Advanced Usage
|
||||||
|
## Custom New Shortcut Buttons / Custom Function Plugins
|
||||||
|
|
||||||
|
1. Custom New Shortcut Buttons (Academic Hotkey)
|
||||||
|
Open `core_functional.py` with any text editor, add an entry as follows and restart the program. (If the button has been successfully added and is visible, the prefix and suffix can be hot-modified without having to restart the program.)
|
||||||
|
For example,
|
||||||
|
```
|
||||||
|
"Super English-to-Chinese": {
|
||||||
|
# Prefix, which will be added before your input. For example, used to describe your requests, such as translation, code explanation, polishing, etc.
|
||||||
|
"Prefix": "Please translate the following content into Chinese and then use a markdown table to explain the proprietary terms that appear in the text:\n\n",
|
||||||
|
|
||||||
|
# Suffix, which is added after your input. For example, with the prefix, your input content can be surrounded by quotes.
|
||||||
|
"Suffix": "",
|
||||||
|
},
|
||||||
|
```
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
2. Custom Function Plugins
|
||||||
|
|
||||||
|
Write powerful function plugins to perform any task you can think of, even those you cannot think of.
|
||||||
|
The difficulty of plugin writing and debugging in this project is very low. As long as you have a certain knowledge of Python, you can implement your own plug-in functions based on the template we provide.
|
||||||
|
For details, please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
|
||||||
|
|
||||||
|
---
|
||||||
|
# Latest Update
|
||||||
|
## New Feature Dynamics
|
||||||
|
1. Conversation saving function. Call `Save current conversation` in the function plugin area to save the current conversation as a readable and recoverable HTML file. In addition, call `Load conversation history archive` in the function plugin area (dropdown menu) to restore previous sessions. Tip: Clicking `Load conversation history archive` without specifying a file will display the cached history of HTML archives, and clicking `Delete all local conversation history` will delete all HTML archive caches.
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/235222390-24a9acc0-680f-49f5-bc81-2f3161f1e049.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
2. Report generation. Most plugins will generate work reports after execution.
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
3. Modular function design with simple interfaces that support powerful functions.
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
4. This is an open-source project that can "self-translate".
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
5. Translating other open-source projects is a piece of cake.
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
6. A small feature decorated with [live2d](https://github.com/fghrsh/live2d_demo) (disabled by default, need to modify `config.py`).
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/236432361-67739153-73e8-43fe-8111-b61296edabd9.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
7. Added MOSS large language model support.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/236639178-92836f37-13af-4fdd-984d-b4450fe30336.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
8. OpenAI image generation.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
9. OpenAI audio parsing and summarization.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
10. Full-text proofreading and error correction of LaTeX.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/651ccd98-02c9-4464-91e1-77a6b7d1b033" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
## Versions:
|
||||||
|
- version 3.5(Todo): Use natural language to call all function plugins of this project (high priority).
|
||||||
|
- version 3.4(Todo): Improve multi-threading support for chatglm local large models.
|
||||||
|
- version 3.3: +Internet information integration function.
|
||||||
|
- version 3.2: Function plugin supports more parameter interfaces (save conversation function, interpretation of any language code + simultaneous inquiry of any LLM combination).
|
||||||
|
- version 3.1: Support simultaneous inquiry of multiple GPT models! Support api2d, and support load balancing of multiple apikeys.
|
||||||
|
- version 3.0: Support chatglm and other small LLM models.
|
||||||
|
- version 2.6: Refactored plugin structure, improved interactivity, and added more plugins.
|
||||||
|
- version 2.5: Self-updating, solving the problem of text overflow and token overflow when summarizing large engineering source codes.
|
||||||
|
- version 2.4: (1) Added PDF full-text translation function; (2) Added the function of switching the position of the input area; (3) Added vertical layout option; (4) Optimized multi-threading function plugins.
|
||||||
|
- version 2.3: Enhanced multi-threading interactivity.
|
||||||
|
- version 2.2: Function plugin supports hot reloading.
|
||||||
|
- version 2.1: Collapsible layout.
|
||||||
|
- version 2.0: Introduction of modular function plugins.
|
||||||
|
- version 1.0: Basic functions.
|
||||||
|
|
||||||
|
gpt_academic Developer QQ Group-2: 610599535
|
||||||
|
|
||||||
|
- Known Issues
|
||||||
|
- Some browser translation plugins interfere with the front-end operation of this software.
|
||||||
|
- Both high and low versions of gradio can lead to various exceptions.
|
||||||
|
|
||||||
|
## Reference and Learning
|
||||||
|
|
||||||
|
```
|
||||||
|
Many other excellent designs have been referenced in the code, mainly including:
|
||||||
|
|
||||||
|
# Project 1: THU ChatGLM-6B:
|
||||||
|
https://github.com/THUDM/ChatGLM-6B
|
||||||
|
|
||||||
|
# Project 2: THU JittorLLMs:
|
||||||
|
https://github.com/Jittor/JittorLLMs
|
||||||
|
|
||||||
|
# Project 3: Edge-GPT:
|
||||||
|
https://github.com/acheong08/EdgeGPT
|
||||||
|
|
||||||
|
# Project 4: ChuanhuChatGPT:
|
||||||
|
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||||
|
|
||||||
|
# Project 5: ChatPaper:
|
||||||
|
https://github.com/kaixindelele/ChatPaper
|
||||||
|
|
||||||
|
# More:
|
||||||
|
https://github.com/gradio-app/gradio
|
||||||
|
https://github.com/fghrsh/live2d_demo
|
||||||
|
```
|
||||||
323
docs/README_FR.md
普通文件
323
docs/README_FR.md
普通文件
@@ -0,0 +1,323 @@
|
|||||||
|
> **Note**
|
||||||
|
>
|
||||||
|
> Ce fichier README est généré automatiquement par le plugin de traduction markdown de ce projet et n'est peut - être pas correct à 100%.
|
||||||
|
>
|
||||||
|
> During installation, please strictly select the versions **specified** in requirements.txt.
|
||||||
|
>
|
||||||
|
> `pip install -r requirements.txt`
|
||||||
|
>
|
||||||
|
|
||||||
|
# <img src="logo.png" width="40" > Optimisation académique GPT (GPT Academic)
|
||||||
|
|
||||||
|
**Si vous aimez ce projet, veuillez lui donner une étoile. Si vous avez trouvé des raccourcis académiques ou des plugins fonctionnels plus utiles, n'hésitez pas à ouvrir une demande ou une pull request.
|
||||||
|
Pour traduire ce projet dans une langue arbitraire avec GPT, lisez et exécutez [`multi_language.py`](multi_language.py) (expérimental).
|
||||||
|
|
||||||
|
> **Note**
|
||||||
|
>
|
||||||
|
> 1. Veuillez noter que seuls les plugins de fonctions (boutons) **en rouge** prennent en charge la lecture de fichiers. Certains plugins se trouvent dans le **menu déroulant** de la zone de plugins. De plus, nous accueillons et traitons les nouvelles pull requests pour les plugins avec **la plus haute priorité**!
|
||||||
|
>
|
||||||
|
> 2. Les fonctions de chaque fichier de ce projet sont expliquées en détail dans l'auto-analyse [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). Avec l'itération des versions, vous pouvez également cliquer sur les plugins de fonctions pertinents et appeler GPT pour régénérer le rapport d'auto-analyse du projet à tout moment. Les FAQ sont résumées dans [le wiki](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Méthode d'installation](#installation).
|
||||||
|
>
|
||||||
|
> 3. Ce projet est compatible avec et encourage l'utilisation de grands modèles de langage nationaux tels que chatglm, RWKV, Pangu, etc. La coexistence de plusieurs clés API est prise en charge et peut être remplie dans le fichier de configuration, tel que `API_KEY="openai-key1,openai-key2,api2d-key3"`. Lorsque vous souhaitez remplacer temporairement `API_KEY`, saisissez temporairement `API_KEY` dans la zone de saisie, puis appuyez sur Entrée pour soumettre et activer.
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
|
||||||
|
Functionnalité | Description
|
||||||
|
--- | ---
|
||||||
|
Révision en un clic | prend en charge la révision en un clic et la recherche d'erreurs de syntaxe dans les articles
|
||||||
|
Traduction chinois-anglais en un clic | Traduction chinois-anglais en un clic
|
||||||
|
Explication de code en un clic | Affichage, explication, génération et ajout de commentaires de code
|
||||||
|
[Raccourcis personnalisés](https://www.bilibili.com/video/BV14s4y1E7jN) | prend en charge les raccourcis personnalisés
|
||||||
|
Conception modulaire | prend en charge de puissants plugins de fonction personnalisée, les plugins prennent en charge la [mise à jour à chaud](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
||||||
|
[Autoscanner](https://www.bilibili.com/video/BV1cj411A7VW) | [Plug-in de fonction] [Compréhension instantanée](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) du code source de ce projet
|
||||||
|
[Analyse de programme](https://www.bilibili.com/video/BV1cj411A7VW) | [Plug-in de fonction] Analyse en un clic de la structure d'autres projets Python / C / C ++ / Java / Lua / ...
|
||||||
|
Lecture d'articles, [traduction](https://www.bilibili.com/video/BV1KT411x7Wn) d'articles | [Plug-in de fonction] Compréhension instantanée de l'article latex / pdf complet et génération de résumés
|
||||||
|
[Traduction](https://www.bilibili.com/video/BV1nk4y1Y7Js/) et [révision](https://www.bilibili.com/video/BV1FT411H7c5/) complets en latex | [Plug-in de fonction] traduction ou révision en un clic d'articles en latex
|
||||||
|
Génération de commentaires en masse | [Plug-in de fonction] Génération en un clic de commentaires de fonction en masse
|
||||||
|
Traduction [chinois-anglais](https://www.bilibili.com/video/BV1yo4y157jV/) en Markdown | [Plug-in de fonction] avez-vous vu la [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) pour les 5 langues ci-dessus?
|
||||||
|
Génération de rapports d'analyse de chat | [Plug-in de fonction] Génère automatiquement un rapport de résumé après l'exécution
|
||||||
|
[Traduction intégrale en pdf](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plug-in de fonction] Extraction de titre et de résumé de l'article pdf + traduction intégrale (multi-thread)
|
||||||
|
[Aide à arxiv](https://www.bilibili.com/video/BV1LM4y1279X) | [Plug-in de fonction] Entrer l'url de l'article arxiv pour traduire et télécharger le résumé en un clic
|
||||||
|
[Aide à la recherche Google Scholar](https://www.bilibili.com/video/BV19L411U7ia) | [Plug-in de fonction] Donnez l'URL de la page de recherche Google Scholar, laissez GPT vous aider à [écrire des ouvrages connexes](https://www.bilibili.com/video/BV1GP411U7Az/)
|
||||||
|
Aggrégation d'informations en ligne et GPT | [Plug-in de fonction] Permet à GPT de [récupérer des informations en ligne](https://www.bilibili.com/video/BV1om4y127ck), puis de répondre aux questions, afin que les informations ne soient jamais obsolètes
|
||||||
|
Affichage d'équations / images / tableaux | Fournit un affichage simultané de [la forme tex et de la forme rendue](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), prend en charge les formules mathématiques et la coloration syntaxique du code
|
||||||
|
Prise en charge des plugins à plusieurs threads | prend en charge l'appel multithread de chatgpt, un clic pour traiter [un grand nombre d'articles](https://www.bilibili.com/video/BV1FT411H7c5/) ou de programmes
|
||||||
|
Thème gradio sombre en option de démarrage | Ajoutez```/?__theme=dark``` à la fin de l'URL du navigateur pour basculer vers le thème sombre
|
||||||
|
[Prise en charge de plusieurs modèles LLM](https://www.bilibili.com/video/BV1wT411p7yf), [API2D](https://api2d.com/) | Sera probablement très agréable d'être servi simultanément par GPT3.5, GPT4, [ChatGLM de Tsinghua](https://github.com/THUDM/ChatGLM-6B), [MOSS de Fudan](https://github.com/OpenLMLab/MOSS)
|
||||||
|
Plus de modèles LLM, déploiement de [huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Ajout prise en charge de l'interface Newbing (nouvelle bing), introduction du support de [Jittorllms de Tsinghua](https://github.com/Jittor/JittorLLMs), [LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV) et [Panguα](https://openi.org.cn/pangu/)
|
||||||
|
Plus de nouvelles fonctionnalités (génération d'images, etc.) ... | Voir la fin de ce document pour plus de détails ...
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
- Nouvelle interface (modifier l'option LAYOUT de `config.py` pour passer d'une disposition ``gauche-droite`` à une disposition ``haut-bas``)
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||||
|
</div>- Tous les boutons sont générés dynamiquement en lisant functional.py et peuvent être facilement personnalisés pour ajouter des fonctionnalités personnalisées, ce qui facilite l'utilisation du presse-papiers.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Correction d'erreurs/lissage du texte.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Si la sortie contient des équations, elles sont affichées à la fois sous forme de tex et sous forme rendue pour faciliter la lecture et la copie.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Pas envie de lire les codes de ce projet? Tout le projet est directement exposé par ChatGPT.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Appel à une variété de modèles de langage de grande envergure (ChatGLM + OpenAI-GPT3.5 + [API2D] (https://api2d.com/)-GPT4).
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
---
|
||||||
|
# Installation
|
||||||
|
## Installation-Method 1: running directly (Windows, Linux or MacOS)
|
||||||
|
|
||||||
|
1. Télécharger le projet
|
||||||
|
```sh
|
||||||
|
git clone https://github.com/binary-husky/gpt_academic.git
|
||||||
|
cd gpt_academic
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Configuration de la clé API
|
||||||
|
|
||||||
|
Dans `config.py`, configurez la clé API et d'autres paramètres. Consultez [Special network environment settings] (https://github.com/binary-husky/gpt_academic/issues/1).
|
||||||
|
|
||||||
|
(P.S. Lorsque le programme est exécuté, il vérifie en premier s'il existe un fichier de configuration privé nommé `config_private.py` et remplace les paramètres portant le même nom dans `config.py` par les paramètres correspondants dans `config_private.py`. Par conséquent, si vous comprenez la logique de lecture de nos configurations, nous vous recommandons vivement de créer un nouveau fichier de configuration nommé `config_private.py` à côté de `config.py` et de transférer (copier) les configurations de `config.py`. `config_private.py` n'est pas contrôlé par Git et peut garantir la sécurité de vos informations privées. P.S. Le projet prend également en charge la configuration de la plupart des options via "variables d'environnement", le format d'écriture des variables d'environnement est référencé dans le fichier `docker-compose`. Priorité de lecture: "variables d'environnement" > `config_private.py` > `config.py`)
|
||||||
|
|
||||||
|
|
||||||
|
3. Installer les dépendances
|
||||||
|
```sh
|
||||||
|
# (Option I: python users instalation) (Python version 3.9 or higher, the newer the better). Note: use official pip source or ali pip source. To temporarily change the source: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||||
|
python -m pip install -r requirements.txt
|
||||||
|
|
||||||
|
# (Option II: non-python users instalation) Use Anaconda, the steps are similar (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||||
|
conda create -n gptac_venv python=3.11 # Create anaconda env
|
||||||
|
conda activate gptac_venv # Activate anaconda env
|
||||||
|
python -m pip install -r requirements.txt # Same step as pip instalation
|
||||||
|
```
|
||||||
|
|
||||||
|
<details><summary>Cliquez ici pour afficher le texte si vous souhaitez prendre en charge THU ChatGLM/FDU MOSS en tant que backend.</summary>
|
||||||
|
<p>
|
||||||
|
|
||||||
|
【Optional】 Si vous souhaitez prendre en charge THU ChatGLM/FDU MOSS en tant que backend, des dépendances supplémentaires doivent être installées (prérequis: compétent en Python + utilisez Pytorch + configuration suffisante de l'ordinateur):
|
||||||
|
```sh
|
||||||
|
# 【Optional Step I】 Support THU ChatGLM. Remarque sur THU ChatGLM: Si vous rencontrez l'erreur "Appel à ChatGLM échoué, les paramètres ChatGLM ne peuvent pas être chargés normalement", reportez-vous à ce qui suit: 1: La version par défaut installée est torch+cpu, si vous souhaitez utiliser cuda, vous devez désinstaller torch et réinstaller torch+cuda; 2: Si le modèle ne peut pas être chargé en raison d'une configuration insuffisante de l'ordinateur local, vous pouvez modifier la précision du modèle dans request_llm/bridge_chatglm.py, modifier AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) par AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||||
|
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||||
|
|
||||||
|
# 【Optional Step II】 Support FDU MOSS
|
||||||
|
python -m pip install -r request_llm/requirements_moss.txt
|
||||||
|
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note: When running this line of code, you must be in the project root path.
|
||||||
|
|
||||||
|
# 【Optional Step III】Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the desired model. Currently, all models supported are as follows (the jittorllms series currently only supports the docker scheme):
|
||||||
|
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||||
|
```
|
||||||
|
|
||||||
|
</p>
|
||||||
|
</details>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
4. Exécution
|
||||||
|
```sh
|
||||||
|
python main.py
|
||||||
|
```5. Plugin de fonction de test
|
||||||
|
```
|
||||||
|
- Fonction de modèle de plugin de test (requiert que GPT réponde à ce qui s'est passé dans l'histoire aujourd'hui), vous pouvez utiliser cette fonction comme modèle pour mettre en œuvre des fonctionnalités plus complexes.
|
||||||
|
Cliquez sur "[Démo de modèle de plugin de fonction] Aujourd'hui dans l'histoire"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Installation - Méthode 2: Utilisation de Docker
|
||||||
|
|
||||||
|
1. ChatGPT uniquement (recommandé pour la plupart des gens)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
git clone https://github.com/binary-husky/gpt_academic.git # Télécharger le projet
|
||||||
|
cd gpt_academic # Accéder au chemin
|
||||||
|
nano config.py # Editez config.py avec n'importe quel éditeur de texte en configurant "Proxy", "API_KEY" et "WEB_PORT" (p. ex. 50923)
|
||||||
|
docker build -t gpt-academic . # Installer
|
||||||
|
|
||||||
|
# (Dernière étape - choix1) Dans un environnement Linux, l'utilisation de `--net=host` est plus facile et rapide
|
||||||
|
docker run --rm -it --net=host gpt-academic
|
||||||
|
# (Dernière étape - choix 2) Dans un environnement macOS/Windows, seule l'option -p permet d'exposer le port du récipient (p.ex. 50923) au port de l'hôte.
|
||||||
|
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
|
||||||
|
```
|
||||||
|
|
||||||
|
2. ChatGPT + ChatGLM + MOSS (il faut connaître Docker)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
# Modifiez docker-compose.yml, supprimez la solution 1 et la solution 3, conservez la solution 2. Modifiez la configuration de la solution 2 dans docker-compose.yml en suivant les commentaires.
|
||||||
|
docker-compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
3. ChatGPT + LLAMA + PanGu + RWKV (il faut connaître Docker)
|
||||||
|
``` sh
|
||||||
|
# Modifiez docker-compose.yml, supprimez la solution 1 et la solution 2, conservez la solution 3. Modifiez la configuration de la solution 3 dans docker-compose.yml en suivant les commentaires.
|
||||||
|
docker-compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Installation - Méthode 3: Autres méthodes de déploiement
|
||||||
|
|
||||||
|
1. Comment utiliser une URL de proxy inversé / Microsoft Azure Cloud API
|
||||||
|
Configurez simplement API_URL_REDIRECT selon les instructions de config.py.
|
||||||
|
|
||||||
|
2. Déploiement distant sur un serveur cloud (connaissance et expérience des serveurs cloud requises)
|
||||||
|
Veuillez consulter [Wiki de déploiement-1] (https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97).
|
||||||
|
|
||||||
|
3. Utilisation de WSL2 (sous-système Windows pour Linux)
|
||||||
|
Veuillez consulter [Wiki de déploiement-2] (https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2).
|
||||||
|
|
||||||
|
4. Comment exécuter sous un sous-répertoire (tel que `http://localhost/subpath`)
|
||||||
|
Veuillez consulter les [instructions d'exécution de FastAPI] (docs/WithFastapi.md).
|
||||||
|
|
||||||
|
5. Utilisation de docker-compose
|
||||||
|
Veuillez lire docker-compose.yml, puis suivre les instructions fournies.
|
||||||
|
|
||||||
|
# Utilisation avancée
|
||||||
|
## Personnalisation de nouveaux boutons pratiques / Plugins de fonctions personnalisées
|
||||||
|
|
||||||
|
1. Personnalisation de nouveaux boutons pratiques (raccourcis académiques)
|
||||||
|
Ouvrez core_functional.py avec n'importe quel éditeur de texte, ajoutez une entrée comme suit, puis redémarrez le programme. (Si le bouton a été ajouté avec succès et est visible, le préfixe et le suffixe prennent en charge les modifications à chaud et ne nécessitent pas le redémarrage du programme pour prendre effet.)
|
||||||
|
Par exemple
|
||||||
|
```
|
||||||
|
"Super coller sens": {
|
||||||
|
# Préfixe, sera ajouté avant votre entrée. Par exemple, pour décrire votre demande, telle que traduire, expliquer du code, faire la mise en forme, etc.
|
||||||
|
"Prefix": "Veuillez traduire le contenu suivant en chinois, puis expliquer chaque terme proprement nommé qui y apparaît avec un tableau markdown:\n\n",
|
||||||
|
|
||||||
|
# Suffixe, sera ajouté après votre entrée. Par exemple, en utilisant le préfixe, vous pouvez entourer votre contenu d'entrée de guillemets.
|
||||||
|
"Suffix": "",
|
||||||
|
},
|
||||||
|
```
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
2. Plugins de fonctions personnalisées
|
||||||
|
|
||||||
|
Écrivez des plugins de fonctions puissants pour effectuer toutes les tâches que vous souhaitez ou que vous ne pouvez pas imaginer.
|
||||||
|
Les plugins de ce projet ont une difficulté de programmation et de débogage très faible. Si vous avez des connaissances de base en Python, vous pouvez simuler la fonctionnalité de votre propre plugin en suivant le modèle que nous avons fourni.
|
||||||
|
Veuillez consulter le [Guide du plugin de fonction] (https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) pour plus de détails.
|
||||||
|
|
||||||
|
---
|
||||||
|
# Latest Update
|
||||||
|
|
||||||
|
## Nouvelles fonctionnalités en cours de déploiement.
|
||||||
|
|
||||||
|
1. Fonction de sauvegarde de la conversation.
|
||||||
|
Appelez simplement "Enregistrer la conversation actuelle" dans la zone de plugin de fonction pour enregistrer la conversation actuelle en tant que fichier html lisible et récupérable. De plus, dans la zone de plugin de fonction (menu déroulant), appelez "Charger une archive de l'historique de la conversation" pour restaurer la conversation précédente. Astuce : cliquer directement sur "Charger une archive de l'historique de la conversation" sans spécifier de fichier permet de consulter le cache d'archive html précédent. Cliquez sur "Supprimer tous les enregistrements locaux de l'historique de la conversation" pour supprimer le cache d'archive html.
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/235222390-24a9acc0-680f-49f5-bc81-2f3161f1e049.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
2. Générer un rapport. La plupart des plugins génèrent un rapport de travail après l'exécution.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
3. Conception de fonctionnalités modulaires avec une interface simple mais capable d'une fonctionnalité puissante.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
4. C'est un projet open source qui peut "se traduire de lui-même".
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
5. Traduire d'autres projets open source n'est pas un problème.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
6. Fonction de décoration de live2d (désactivée par défaut, nécessite une modification de config.py).
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/236432361-67739153-73e8-43fe-8111-b61296edabd9.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
7. Prise en charge du modèle de langue MOSS.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/236639178-92836f37-13af-4fdd-984d-b4450fe30336.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
8. Génération d'images OpenAI.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
9. Analyse et synthèse vocales OpenAI.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
10. Correction de la totalité des erreurs de Latex.
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/651ccd98-02c9-4464-91e1-77a6b7d1b033" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
## Versions :
|
||||||
|
- version 3.5 (À faire) : appel de toutes les fonctions de plugin de ce projet en langage naturel (priorité élevée)
|
||||||
|
- version 3.4 (À faire) : amélioration du support multi-thread de chatglm en local
|
||||||
|
- version 3.3 : Fonctionnalité intégrée d'informations d'internet
|
||||||
|
- version 3.2 : La fonction du plugin de fonction prend désormais en charge des interfaces de paramètres plus nombreuses (fonction de sauvegarde, décodage de n'importe quel langage de code + interrogation simultanée de n'importe quelle combinaison de LLM)
|
||||||
|
- version 3.1 : Prise en charge de l'interrogation simultanée de plusieurs modèles GPT ! Support api2d, équilibrage de charge multi-clé api.
|
||||||
|
- version 3.0 : Prise en charge de chatglm et autres LLM de petite taille.
|
||||||
|
- version 2.6 : Refonte de la structure des plugins, amélioration de l'interactivité, ajout de plus de plugins.
|
||||||
|
- version 2.5 : Auto-mise à jour, résolution des problèmes de texte trop long et de dépassement de jetons lors de la compilation du projet global.
|
||||||
|
- version 2.4 : (1) Nouvelle fonction de traduction de texte intégral PDF ; (2) Nouvelle fonction de permutation de position de la zone d'entrée ; (3) Nouvelle option de mise en page verticale ; (4) Amélioration des fonctions multi-thread de plug-in.
|
||||||
|
- version 2.3 : Amélioration de l'interactivité multithread.
|
||||||
|
- version 2.2 : Les plugins de fonctions peuvent désormais être rechargés à chaud.
|
||||||
|
- version 2.1 : Disposition pliable
|
||||||
|
- version 2.0 : Introduction de plugins de fonctions modulaires
|
||||||
|
- version 1.0 : Fonctionnalités de base
|
||||||
|
|
||||||
|
gpt_academic développeur QQ groupe-2:610599535
|
||||||
|
|
||||||
|
- Problèmes connus
|
||||||
|
- Certains plugins de traduction de navigateur perturbent le fonctionnement de l'interface frontend de ce logiciel
|
||||||
|
- Des versions gradio trop hautes ou trop basses provoquent de nombreuses anomalies
|
||||||
|
|
||||||
|
## Référence et apprentissage
|
||||||
|
|
||||||
|
```
|
||||||
|
De nombreux autres excellents projets ont été référencés dans le code, notamment :
|
||||||
|
|
||||||
|
# Projet 1 : ChatGLM-6B de Tsinghua :
|
||||||
|
https://github.com/THUDM/ChatGLM-6B
|
||||||
|
|
||||||
|
# Projet 2 : JittorLLMs de Tsinghua :
|
||||||
|
https://github.com/Jittor/JittorLLMs
|
||||||
|
|
||||||
|
# Projet 3 : Edge-GPT :
|
||||||
|
https://github.com/acheong08/EdgeGPT
|
||||||
|
|
||||||
|
# Projet 4 : ChuanhuChatGPT :
|
||||||
|
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||||
|
|
||||||
|
# Projet 5 : ChatPaper :
|
||||||
|
https://github.com/kaixindelele/ChatPaper
|
||||||
|
|
||||||
|
# Plus :
|
||||||
|
https://github.com/gradio-app/gradio
|
||||||
|
https://github.com/fghrsh/live2d_demo
|
||||||
|
```
|
||||||
329
docs/README_JP.md
普通文件
329
docs/README_JP.md
普通文件
@@ -0,0 +1,329 @@
|
|||||||
|
> **Note**
|
||||||
|
>
|
||||||
|
> このReadmeファイルは、このプロジェクトのmarkdown翻訳プラグインによって自動的に生成されたもので、100%正確ではない可能性があります。
|
||||||
|
>
|
||||||
|
> When installing dependencies, please strictly choose the versions specified in `requirements.txt`.
|
||||||
|
>
|
||||||
|
> `pip install -r requirements.txt`
|
||||||
|
>
|
||||||
|
|
||||||
|
# <img src="logo.png" width="40" > GPT 学术优化 (GPT Academic)
|
||||||
|
|
||||||
|
**もしこのプロジェクトが好きなら、星をつけてください。もしあなたがより良いアカデミックショートカットまたは機能プラグインを思いついた場合、Issueをオープンするか pull request を送信してください。私たちはこのプロジェクト自体によって翻訳された[英語 |](README_EN.md)[日本語 |](README_JP.md)[한국어 |](https://github.com/mldljyh/ko_gpt_academic)[Русский |](README_RS.md)[Français](README_FR.md)のREADMEも用意しています。
|
||||||
|
GPTを使った任意の言語にこのプロジェクトを翻訳するには、[`multi_language.py`](multi_language.py)を読んで実行してください。 (experimental)。
|
||||||
|
|
||||||
|
> **注意**
|
||||||
|
>
|
||||||
|
> 1. **赤色**で表示された関数プラグイン(ボタン)のみ、ファイルの読み取りをサポートしています。一部のプラグインは、プラグインエリアの**ドロップダウンメニュー**内にあります。また、私たちはどんな新しいプラグインのPRでも、**最優先**で歓迎し、処理します!
|
||||||
|
>
|
||||||
|
> 2. このプロジェクトの各ファイルの機能は、自己解析の詳細説明書である[`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)で説明されています。バージョンが進化するにつれて、関連する関数プラグインをいつでもクリックし、GPTを呼び出してプロジェクトの自己解析レポートを再生成することができます。よくある問題は[`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)にまとめられています。[インストール方法](#installation)。
|
||||||
|
|
||||||
|
> 3. このプロジェクトは、chatglmやRWKV、パンクなど、国内の大規模自然言語モデルを利用することをサポートし、試みることを奨励します。複数のAPIキーを共存することができ、設定ファイルに`API_KEY="openai-key1,openai-key2,api2d-key3"`のように記入することができます。`API_KEY`を一時的に変更する場合は、入力エリアに一時的な`API_KEY`を入力してEnterキーを押せば、それが有効になります。
|
||||||
|
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
|
||||||
|
機能 | 説明
|
||||||
|
--- | ---
|
||||||
|
一键校正 | 一键で校正可能、論文の文法エラーを検索することができる
|
||||||
|
一键中英翻訳 | 一键で中英翻訳可能
|
||||||
|
一键コード解説 | コードを表示し、解説し、生成し、コードに注釈をつけることができる
|
||||||
|
[自分でカスタマイズ可能なショートカットキー](https://www.bilibili.com/video/BV14s4y1E7jN) | 自分でカスタマイズ可能なショートカットキーをサポートする
|
||||||
|
モジュール化された設計 | カスタマイズ可能な[強力な関数プラグイン](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions)をサポートし、プラグインは[ホットアップデート](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)に対応している
|
||||||
|
[自己プログラム解析](https://www.bilibili.com/video/BV1cj411A7VW) | [関数プラグイン] [一键読解](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)このプロジェクトのソースコード
|
||||||
|
プログラム解析 | [関数プラグイン] 一鍵で他のPython/C/C++/Java/Lua/...プロジェクトを分析できる
|
||||||
|
論文の読み、[翻訳](https://www.bilibili.com/video/BV1KT411x7Wn) | [関数プラグイン] LaTex/ PDF論文の全文を一鍵で読み解き、要約を生成することができる
|
||||||
|
LaTex全文[翻訳](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[校正](https://www.bilibili.com/video/BV1FT411H7c5/) | [関数プラグイン] LaTex論文の翻訳または校正を一鍵で行うことができる
|
||||||
|
一括で注釈を生成 | [関数プラグイン] 一鍵で関数に注釈をつけることができる
|
||||||
|
Markdown[中英翻訳](https://www.bilibili.com/video/BV1yo4y157jV/) | [関数プラグイン] 上記の5種類の言語の[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)を見たことがありますか?
|
||||||
|
チャット分析レポート生成 | [関数プラグイン] 実行後、自動的に概要報告書を生成する
|
||||||
|
[PDF論文全文翻訳機能](https://www.bilibili.com/video/BV1KT411x7Wn) | [関数プラグイン] PDF論文からタイトルと要約を抽出し、全文を翻訳する(マルチスレッド)
|
||||||
|
[Arxivアシスタント](https://www.bilibili.com/video/BV1LM4y1279X) | [関数プラグイン] arxiv記事のURLを入力するだけで、要約を一鍵翻訳し、PDFをダウンロードできる
|
||||||
|
[Google Scholar 総合アシスタント](https://www.bilibili.com/video/BV19L411U7ia) | [関数プラグイン] 任意のGoogle Scholar検索ページURLを指定すると、gptが[related works](https://www.bilibili.com/video/BV1GP411U7Az/)を作成する
|
||||||
|
インターネット情報収集+GPT | [関数プラグイン] まずGPTに[インターネットから情報を収集](https://www.bilibili.com/video/BV1om4y127ck)してから質問に回答させ、情報が常に最新であるようにする
|
||||||
|
数式/画像/表表示 | 数式の[tex形式とレンダリング形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png)を同時に表示し、数式、コードハイライトをサポートしている
|
||||||
|
マルチスレッド関数プラグインがサポートされている | chatgptをマルチスレッドで呼び出し、[大量のテキスト](https://www.bilibili.com/video/BV1FT411H7c5/)またはプログラムを一鍵で処理できる
|
||||||
|
ダークグラジオ[テーマの起動](https://github.com/binary-husky/gpt_academic/issues/173) | ブラウザのURLの後ろに```/?__theme=dark```を追加すると、ダークテーマを切り替えることができます。
|
||||||
|
[多数のLLMモデル](https://www.bilibili.com/video/BV1wT411p7yf)がサポートされ、[API2D](https://api2d.com/)がサポートされている | 同時にGPT3.5、GPT4、[清華ChatGLM](https://github.com/THUDM/ChatGLM-6B)、[復旦MOSS](https://github.com/OpenLMLab/MOSS)に対応
|
||||||
|
より多くのLLMモデルが接続され、[huggingfaceデプロイ](https://huggingface.co/spaces/qingxu98/gpt-academic)がサポートされている | Newbingインターフェイス(Newbing)、清華大学の[Jittorllm](https://github.com/Jittor/JittorLLMs)のサポート[LLaMA](https://github.com/facebookresearch/llama), [RWKV](https://github.com/BlinkDL/ChatRWKV)と[盘古α](https://openi.org.cn/pangu/)
|
||||||
|
さらに多くの新機能(画像生成など)を紹介する... | この文書の最後に示す...
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- 新しいインターフェース(`config.py`のLAYOUTオプションを変更することで、「左右配置」と「上下配置」を切り替えることができます)
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||||
|
</div>- All buttons are dynamically generated by reading functional.py, and custom functions can be freely added to free the clipboard.
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Polishing/Correction
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- If the output contains formulas, they are displayed in both TeX and rendering forms, making it easy to copy and read.
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Don't feel like looking at the project code? Just ask chatgpt directly.
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
- Mixed calls of multiple large language models (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# Installation
|
||||||
|
|
||||||
|
## Installation-Method 1: Directly run (Windows, Linux or MacOS)
|
||||||
|
|
||||||
|
1. Download the project.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
git clone https://github.com/binary-husky/gpt_academic.git
|
||||||
|
cd gpt_academic
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Configure the API_KEY.
|
||||||
|
|
||||||
|
Configure the API KEY and other settings in `config.py` and [special network environment settings](https://github.com/binary-husky/gpt_academic/issues/1).
|
||||||
|
|
||||||
|
(P.S. When the program is running, it will first check if there is a private configuration file named `config_private.py`, and use the configuration in it to override the same name configuration in `config.py`. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py`, and transfer (copy) the configuration in `config.py` to `config_private.py`. `config_private.py` is not controlled by git and can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`, and the writing format of environment variables refers to the `docker-compose` file. Reading priority: `environment variables` > `config_private.py` > `config.py`)
|
||||||
|
|
||||||
|
3. Install dependencies.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# (Choose I: If familiar with Python)(Python version 3.9 or above, the newer the better) Note: Use the official pip source or Ali pip source. Temporary switching source method: python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||||
|
python -m pip install -r requirements.txt
|
||||||
|
|
||||||
|
# (Choose II: If not familiar with Python) Use anaconda, the steps are the same (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||||
|
conda create -n gptac_venv python=3.11 # Create anaconda environment.
|
||||||
|
conda activate gptac_venv # Activate the anaconda environment.
|
||||||
|
python -m pip install -r requirements.txt # This step is the same as the pip installation step.
|
||||||
|
```
|
||||||
|
|
||||||
|
<details><summary>If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, click to expand.</summary>
|
||||||
|
<p>
|
||||||
|
|
||||||
|
[Optional Steps] If you need to support Tsinghua ChatGLM/Fudan MOSS as a backend, you need to install more dependencies (precondition: familiar with Python + used Pytorch + computer configuration). Strong enough):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Optional step I: support Tsinghua ChatGLM. Tsinghua ChatGLM remarks: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters normally", refer to the following: 1: The version installed above is torch+cpu version, using cuda requires uninstalling torch and reinstalling torch+cuda; 2: If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, and change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).
|
||||||
|
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||||
|
|
||||||
|
# Optional Step II: Support Fudan MOSS.
|
||||||
|
python -m pip install -r request_llm/requirements_moss.txt
|
||||||
|
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, it must be in the project root.
|
||||||
|
|
||||||
|
# 【Optional Step III】Ensure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports the docker solution):
|
||||||
|
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||||
|
```
|
||||||
|
|
||||||
|
</p>
|
||||||
|
</details>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
4. Run.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
python main.py
|
||||||
|
```5. Testing Function Plugin
|
||||||
|
```
|
||||||
|
- Test function plugin template function (requires gpt to answer what happened today in history), you can use this function as a template to implement more complex functions
|
||||||
|
Click "[Function Plugin Template Demo] Today in History"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Installation-Methods 2: Using Docker
|
||||||
|
|
||||||
|
1. Only ChatGPT (recommended for most people)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
git clone https://github.com/binary-husky/gpt_academic.git # Download project
|
||||||
|
cd gpt_academic # Enter path
|
||||||
|
nano config.py # Edit config.py with any text editor ‑ configure "Proxy," "API_KEY," "WEB_PORT" (e.g., 50923) and more
|
||||||
|
docker build -t gpt-academic . # installation
|
||||||
|
|
||||||
|
#(Last step-Option 1) In a Linux environment, `--net=host` is more convenient and quick
|
||||||
|
docker run --rm -it --net=host gpt-academic
|
||||||
|
#(Last step-Option 2) In a macOS/windows environment, the -p option must be used to expose the container port (e.g., 50923) to the port on the host.
|
||||||
|
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
|
||||||
|
```
|
||||||
|
|
||||||
|
2. ChatGPT + ChatGLM + MOSS (requires familiarity with Docker)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
# Modify docker-compose.yml, delete plans 1 and 3, and retain plan 2. Modify the configuration of plan 2 in docker-compose.yml, and reference the comments for instructions.
|
||||||
|
docker-compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
3. ChatGPT + LLAMA + Pangu + RWKV (requires familiarity with Docker)
|
||||||
|
``` sh
|
||||||
|
# Modify docker-compose.yml, delete plans 1 and 2, and retain plan 3. Modify the configuration of plan 3 in docker-compose.yml, and reference the comments for instructions.
|
||||||
|
docker-compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Installation-Method 3: Other Deployment Methods
|
||||||
|
|
||||||
|
1. How to use proxy URL/Microsoft Azure API
|
||||||
|
Configure API_URL_REDIRECT according to the instructions in `config.py`.
|
||||||
|
|
||||||
|
2. Remote Cloud Server Deployment (requires cloud server knowledge and experience)
|
||||||
|
Please visit [Deployment Wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||||
|
|
||||||
|
3. Using WSL2 (Windows Subsystem for Linux Subsystem)
|
||||||
|
Please visit [Deployment Wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||||
|
|
||||||
|
4. How to run on a secondary URL (such as `http://localhost/subpath`)
|
||||||
|
Please visit [FastAPI Running Instructions](docs/WithFastapi.md)
|
||||||
|
|
||||||
|
5. Run with docker-compose
|
||||||
|
Please read docker-compose.yml and follow the instructions provided therein.
|
||||||
|
---
|
||||||
|
# Advanced Usage
|
||||||
|
## Customize new convenience buttons/custom function plugins
|
||||||
|
|
||||||
|
1. Custom new convenience buttons (academic shortcut keys)
|
||||||
|
Open `core_functional.py` with any text editor, add the item as follows, and restart the program. (If the button has been added successfully and is visible, the prefix and suffix support hot modification without restarting the program.)
|
||||||
|
example:
|
||||||
|
```
|
||||||
|
"Super English to Chinese Translation": {
|
||||||
|
# Prefix, which will be added before your input. For example, used to describe your request, such as translation, code interpretation, polish, etc.
|
||||||
|
"Prefix": "Please translate the following content into Chinese, and explain the proper nouns in the text in a markdown table one by one:\n\n",
|
||||||
|
|
||||||
|
# Suffix, which will be added after your input. For example, in combination with the prefix, you can surround your input content with quotation marks.
|
||||||
|
"Suffix": "",
|
||||||
|
},
|
||||||
|
```
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
2. Custom function plugins
|
||||||
|
|
||||||
|
Write powerful function plugins to perform any task you can and cannot think of.
|
||||||
|
The difficulty of writing and debugging plugins in this project is low, and as long as you have a certain amount of python basic knowledge, you can follow the template provided by us to achieve your own plugin functions.
|
||||||
|
For details, please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
|
||||||
|
|
||||||
|
---
|
||||||
|
# Latest Update
|
||||||
|
## New feature dynamics.
|
||||||
|
1. ダイアログの保存機能。関数プラグインエリアで '現在の会話を保存' を呼び出すと、現在のダイアログを読み取り可能で復元可能なHTMLファイルとして保存できます。さらに、関数プラグインエリア(ドロップダウンメニュー)で 'ダイアログの履歴保存ファイルを読み込む' を呼び出すことで、以前の会話を復元することができます。Tips:ファイルを指定せずに 'ダイアログの履歴保存ファイルを読み込む' をクリックすることで、過去のHTML保存ファイルのキャッシュを表示することができます。'すべてのローカルダイアログの履歴を削除' をクリックすることで、すべてのHTML保存ファイルのキャッシュを削除できます。
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/235222390-24a9acc0-680f-49f5-bc81-2f3161f1e049.png" width="500">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
2. 報告書を生成します。ほとんどのプラグインは、実行が終了した後に作業報告書を生成します。
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
3. モジュール化された機能設計、簡単なインターフェースで強力な機能をサポートする。
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
4. 自己解決可能なオープンソースプロジェクトです。
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="500">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
5. 他のオープンソースプロジェクトの解読、容易である。
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="500">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="500">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
6. [Live2D](https://github.com/fghrsh/live2d_demo)のデコレート小機能です。(デフォルトでは閉じてますが、 `config.py`を変更する必要があります。)
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/236432361-67739153-73e8-43fe-8111-b61296edabd9.png" width="500">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
7. 新たにMOSS大言語モデルのサポートを追加しました。
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/236639178-92836f37-13af-4fdd-984d-b4450fe30336.png" width="500">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
8. OpenAI画像生成
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
9. OpenAIオーディオの解析とサマリー
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
10. 全文校正されたLaTeX
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/651ccd98-02c9-4464-91e1-77a6b7d1b033" width="500">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
## バージョン:
|
||||||
|
- version 3.5(作業中):すべての関数プラグインを自然言語で呼び出すことができるようにする(高い優先度)。
|
||||||
|
- version 3.4(作業中):chatglmのローカルモデルのマルチスレッドをサポートすることで、機能を改善する。
|
||||||
|
- version 3.3:+Web情報の総合機能
|
||||||
|
- version 3.2:関数プラグインでさらに多くのパラメータインターフェイスをサポートする(ダイアログの保存機能、任意の言語コードの解読+同時に任意のLLM組み合わせに関する問い合わせ)
|
||||||
|
- version 3.1:複数のGPTモデルを同時に質問できるようになりました! api2dをサポートし、複数のAPIキーを均等に負荷分散することができます。
|
||||||
|
- version 3.0:chatglmとその他の小型LLMのサポート。
|
||||||
|
- version 2.6:プラグイン構造を再構築し、対話内容を高め、より多くのプラグインを追加しました。
|
||||||
|
- version 2.5:自己アップデートし、長文書やトークンのオーバーフローの問題を解決しました。
|
||||||
|
- version 2.4:(1)全文翻訳のPDF機能を追加しました。(2)入力エリアの位置切り替え機能を追加しました。(3)垂直レイアウトオプションを追加しました。(4)マルチスレッド関数プラグインを最適化しました。
|
||||||
|
- version 2.3:マルチスレッド性能の向上。
|
||||||
|
- version 2.2:関数プラグインのホットリロードをサポートする。
|
||||||
|
- version 2.1:折りたたみ式レイアウト。
|
||||||
|
- version 2.0:モジュール化された関数プラグインを導入。
|
||||||
|
- version 1.0:基本機能
|
||||||
|
|
||||||
|
gpt_academic開発者QQグループ-2:610599535
|
||||||
|
|
||||||
|
- 既知の問題
|
||||||
|
- 一部のブラウザ翻訳プラグインが、このソフトウェアのフロントエンドの実行を妨害する
|
||||||
|
- gradioバージョンが高すぎるか低すぎると、多くの異常が引き起こされる
|
||||||
|
|
||||||
|
## 参考学習
|
||||||
|
|
||||||
|
```
|
||||||
|
コードの中には、他の優れたプロジェクトの設計から参考にしたものがたくさん含まれています:
|
||||||
|
|
||||||
|
# プロジェクト1:清華ChatGLM-6B:
|
||||||
|
https://github.com/THUDM/ChatGLM-6B
|
||||||
|
|
||||||
|
# プロジェクト2:清華JittorLLMs:
|
||||||
|
https://github.com/Jittor/JittorLLMs
|
||||||
|
|
||||||
|
# プロジェクト3:Edge-GPT:
|
||||||
|
https://github.com/acheong08/EdgeGPT
|
||||||
|
|
||||||
|
# プロジェクト4:ChuanhuChatGPT:
|
||||||
|
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||||
|
|
||||||
|
# プロジェクト5:ChatPaper:
|
||||||
|
https://github.com/kaixindelele/ChatPaper
|
||||||
|
|
||||||
|
# その他:
|
||||||
|
https://github.com/gradio-app/gradio
|
||||||
|
https://github.com/fghrsh/live2d_demo
|
||||||
|
```
|
||||||
278
docs/README_RS.md
普通文件
278
docs/README_RS.md
普通文件
@@ -0,0 +1,278 @@
|
|||||||
|
> **Note**
|
||||||
|
>
|
||||||
|
> Этот файл самовыражения автоматически генерируется модулем перевода markdown в этом проекте и может быть не на 100% правильным.
|
||||||
|
>
|
||||||
|
# <img src="logo.png" width="40" > GPT Академическая оптимизация (GPT Academic)
|
||||||
|
|
||||||
|
**Если вам нравится этот проект, пожалуйста, поставьте ему звезду. Если вы придумали более полезные языковые ярлыки или функциональные плагины, не стесняйтесь открывать issue или pull request.
|
||||||
|
Чтобы перевести этот проект на произвольный язык с помощью GPT, ознакомьтесь и запустите [`multi_language.py`](multi_language.py) (экспериментальный).
|
||||||
|
|
||||||
|
> **Примечание**
|
||||||
|
>
|
||||||
|
> 1. Обратите внимание, что только функциональные плагины (кнопки), помеченные **красным цветом**, поддерживают чтение файлов, некоторые плагины находятся в **выпадающем меню** в области плагинов. Кроме того, мы с наивысшим приоритетом рады и обрабатываем pull requests для любых новых плагинов!
|
||||||
|
>
|
||||||
|
> 2. В каждом файле проекта функциональность описана в документе самоанализа [`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). С каждой итерацией выполнения версии вы можете в любое время вызвать повторное создание отчета о самоанализе этого проекта, щелкнув соответствующий функциональный плагин и вызвав GPT. Вопросы сборки описаны в [`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Метод установки](#installation).
|
||||||
|
>
|
||||||
|
> 3. Этот проект совместим и поощряет использование китайских языковых моделей chatglm и RWKV, пангу и т. Д. Поддержка нескольких api-key, которые могут существовать одновременно, может быть указан в файле конфигурации, например `API_KEY="openai-key1,openai-key2,api2d-key3"`. Если требуется временно изменить `API_KEY`, введите временный `API_KEY` в области ввода и нажмите клавишу Enter, чтобы он вступил в силу.
|
||||||
|
|
||||||
|
> **Примечание**
|
||||||
|
>
|
||||||
|
> При установке зависимостей строго выбирайте версии, **указанные в файле requirements.txt**.
|
||||||
|
>
|
||||||
|
> `pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/`## Задание
|
||||||
|
|
||||||
|
Вы профессиональный переводчик научных статей.
|
||||||
|
|
||||||
|
Переведите этот файл в формате Markdown на русский язык. Не изменяйте существующие команды Markdown, ответьте только переведенными результатами.
|
||||||
|
|
||||||
|
## Результат
|
||||||
|
|
||||||
|
Функция | Описание
|
||||||
|
--- | ---
|
||||||
|
Однокнопочный стиль | Поддержка однокнопочного стиля и поиска грамматических ошибок в научных статьях
|
||||||
|
Однокнопочный перевод на английский и китайский | Однокнопочный перевод на английский и китайский
|
||||||
|
Однокнопочное объяснение кода | Показ кода, объяснение его, генерация кода, комментирование кода
|
||||||
|
[Настройка быстрых клавиш](https://www.bilibili.com/video/BV14s4y1E7jN) | Поддержка настройки быстрых клавиш
|
||||||
|
Модульный дизайн | Поддержка пользовательских функциональных плагинов мощных [функциональных плагинов](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions), плагины поддерживают [горячую замену](https://github.com/binary-husky/gpt_academic/wiki/Function-Plug-in-Guide)
|
||||||
|
[Анализ своей программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] [Однокнопочный просмотр](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academicProject-Self-analysis-Report) исходного кода этого проекта
|
||||||
|
[Анализ программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] Однокнопочный анализ дерева других проектов Python/C/C++/Java/Lua/...
|
||||||
|
Чтение статей, [перевод](https://www.bilibili.com/video/BV1KT411x7Wn) статей | [Функциональный плагин] Однокнопочное чтение полного текста научных статей и генерация резюме
|
||||||
|
Полный перевод [LaTeX](https://www.bilibili.com/video/BV1nk4y1Y7Js/) и совершенствование | [Функциональный плагин] Однокнопочный перевод или совершенствование LaTeX статьи
|
||||||
|
Автоматическое комментирование | [Функциональный плагин] Однокнопочное автоматическое генерирование комментариев функций
|
||||||
|
[Перевод](https://www.bilibili.com/video/BV1yo4y157jV/) Markdown на английский и китайский | [Функциональный плагин] Вы видели обе версии файлов [README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md) для этих 5 языков?
|
||||||
|
Отчет о чат-анализе | [Функциональный плагин] После запуска будет автоматически сгенерировано сводное извещение
|
||||||
|
Функция перевода полного текста [PDF-статьи](https://www.bilibili.com/video/BV1KT411x7Wn) | [Функциональный плагин] Извлечение заголовка и резюме [PDF-статьи](https://www.bilibili.com/video/BV1KT411x7Wn) и перевод всего документа (многопоточность)
|
||||||
|
[Arxiv Helper](https://www.bilibili.com/video/BV1LM4y1279X) | [Функциональный плагин] Введите URL статьи на arxiv и одним щелчком мыши переведите резюме и загрузите PDF
|
||||||
|
[Google Scholar Integration Helper](https://www.bilibili.com/video/BV19L411U7ia) | [Функциональный плагин] При заданном любом URL страницы поиска в Google Scholar позвольте gpt вам помочь [написать обзор](https://www.bilibili.com/video/BV1GP411U7Az/)
|
||||||
|
Сбор Интернет-информации + GPT | [Функциональный плагин] Однокнопочный [запрос информации из Интернета GPT](https://www.bilibili.com/video/BV1om4y127ck), затем ответьте на вопрос, чтобы информация не устарела никогда
|
||||||
|
Отображение формул / изображений / таблиц | Может одновременно отображать формулы в [формате Tex и рендеринге](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), поддерживает формулы, подсвечивает код
|
||||||
|
Поддержка функций с многопоточностью | Поддержка многопоточного вызова chatgpt, однокнопочная обработка [больших объемов текста](https://www.bilibili.com/video/BV1FT411H7c5/) или программ
|
||||||
|
Темная тема gradio для запуска приложений | Добавьте ```/?__theme=dark``` после URL в браузере, чтобы переключиться на темную тему
|
||||||
|
[Поддержка нескольких моделей LLM](https://www.bilibili.com/video/BV1wT411p7yf), [API2D](https://api2d.com/) | Они одновременно обслуживаются GPT3.5, GPT4, [Clear ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS)
|
||||||
|
Подключение нескольких новых моделей LLM, поддержка деплоя[huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Подключение интерфейса Newbing (новый Bing), подключение поддержки [LLaMA](https://github.com/facebookresearch/llama), поддержка [RWKV](https://github.com/BlinkDL/ChatRWKV) и [Pangu α](https://openi.org.cn/pangu/)
|
||||||
|
Больше новых функций (генерация изображения и т. д.) | См. на конце этого файла…- All buttons are dynamically generated by reading functional.py, and custom functions can be freely added to liberate the clipboard
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Revision/Correction
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- If the output contains formulas, they will be displayed in both tex and rendered form for easy copying and reading
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Don't feel like looking at project code? Show the entire project directly in chatgpt
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
- Mixing multiple large language models (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
---
|
||||||
|
# Installation
|
||||||
|
## Installation-Method 1: Run directly (Windows, Linux or MacOS)
|
||||||
|
|
||||||
|
1. Download the project
|
||||||
|
```sh
|
||||||
|
git clone https://github.com/binary-husky/gpt_academic.git
|
||||||
|
cd gpt_academic
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Configure API_KEY
|
||||||
|
|
||||||
|
In `config.py`, configure API KEY and other settings, [special network environment settings] (https://github.com/binary-husky/gpt_academic/issues/1).
|
||||||
|
|
||||||
|
(P.S. When the program is running, it will first check whether there is a secret configuration file named `config_private.py` and use the configuration in it to replace the same name in` config.py`. Therefore, if you understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py`, and transfer (copy) the configuration in `config.py` to `config_private.py`. `config_private.py` is not controlled by git, which can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`, and the writing format of environment variables refers to the `docker-compose` file. Priority of read: `environment variable`>`config_private.py`>`config.py`)
|
||||||
|
|
||||||
|
|
||||||
|
3. Install dependencies
|
||||||
|
```sh
|
||||||
|
# (Option I: If familiar with Python)(Python version 3.9 or above, the newer the better), note: use the official pip source or the aliyun pip source, temporary switching source method: python -m pip install -r requirements.txt - i https://mirrors.aliyun.com/pypi/simple/
|
||||||
|
python -m pip install -r requirements.txt
|
||||||
|
|
||||||
|
# (Option II: If unfamiliar with Python)Use Anaconda, the steps are also similar (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||||
|
conda create -n gptac_venv python=3.11 # create an Anaconda environment
|
||||||
|
conda activate gptac_venv # activate Anaconda environment
|
||||||
|
python -m pip install -r requirements.txt # This step is the same as the pip installation
|
||||||
|
```
|
||||||
|
|
||||||
|
<details><summary> If you need to support Tsinghua ChatGLM/Fudan MOSS as backend, click here to expand </summary>
|
||||||
|
<p>
|
||||||
|
|
||||||
|
[Optional step] If you need to support Tsinghua ChatGLM/Fudan MOSS as backend, you need to install more dependencies (prerequisites: familiar with Python + have used Pytorch + computer configuration is strong):
|
||||||
|
```sh
|
||||||
|
# [Optional step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM note: If you encounter the "Call ChatGLM fail cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installation above is torch+cpu version, and cuda is used Need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) Modify to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||||
|
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||||
|
|
||||||
|
# [Optional step II] Support Fudan MOSS
|
||||||
|
python -m pip install -r request_llm/requirements_moss.txt
|
||||||
|
git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, you must be in the project root path
|
||||||
|
|
||||||
|
# [Optional step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently, all supported models are as follows (the jittorllms series currently only supports the docker solution):
|
||||||
|
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||||
|
```
|
||||||
|
|
||||||
|
</p>
|
||||||
|
</details>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
4. Run
|
||||||
|
```sh
|
||||||
|
python main.py
|
||||||
|
```5. Testing Function Plugin
|
||||||
|
```
|
||||||
|
- Testing function plugin template function (requires GPT to answer what happened in history today), you can use this function as a template to implement more complex functions
|
||||||
|
Click "[Function plugin Template Demo] On this day in history"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Installation - Method 2: Using Docker
|
||||||
|
|
||||||
|
1. ChatGPT only (recommended for most people)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
git clone https://github.com/binary-husky/gpt_academic.git # download the project
|
||||||
|
cd gpt_academic # enter the path
|
||||||
|
nano config.py # edit config.py with any text editor to configure "Proxy", "API_KEY", and "WEB_PORT" (eg 50923)
|
||||||
|
docker build -t gpt-academic . # install
|
||||||
|
|
||||||
|
# (Last step-Option 1) In a Linux environment, using `--net=host` is more convenient and faster
|
||||||
|
docker run --rm -it --net=host gpt-academic
|
||||||
|
# (Last step-Option 2) In macOS/windows environment, only -p option can be used to expose the port on the container (eg 50923) to the port on the host
|
||||||
|
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
|
||||||
|
```
|
||||||
|
|
||||||
|
2. ChatGPT + ChatGLM + MOSS (requires familiarity with Docker)
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
# Edit docker-compose.yml, delete solutions 1 and 3, and keep solution 2. Modify the configuration of solution 2 in docker-compose.yml, refer to the comments in it
|
||||||
|
docker-compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
3. ChatGPT + LLAMA + PanGu + RWKV (requires familiarity with Docker)
|
||||||
|
``` sh
|
||||||
|
# Edit docker-compose.yml, delete solutions 1 and 2, and keep solution 3. Modify the configuration of solution 3 in docker-compose.yml, refer to the comments in it
|
||||||
|
docker-compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Installation Method 3: Other Deployment Methods
|
||||||
|
|
||||||
|
1. How to use reverse proxy URL/Microsoft Azure API
|
||||||
|
Configure API_URL_REDIRECT according to the instructions in `config.py`.
|
||||||
|
|
||||||
|
2. Remote Cloud Server Deployment (Requires Knowledge and Experience of Cloud Servers)
|
||||||
|
Please visit [Deployment Wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||||
|
|
||||||
|
3. Using WSL2 (Windows Subsystem for Linux subsystem)
|
||||||
|
Please visit [Deployment Wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||||
|
|
||||||
|
4. How to run at the secondary URL (such as `http://localhost/subpath`)
|
||||||
|
Please visit [FastAPI Operation Instructions](docs/WithFastapi.md)
|
||||||
|
|
||||||
|
5. Using docker-compose to run
|
||||||
|
Please read docker-compose.yml and follow the prompts to operate.
|
||||||
|
|
||||||
|
---
|
||||||
|
# Advanced Usage
|
||||||
|
## Customize new convenient buttons / custom function plugins
|
||||||
|
|
||||||
|
1. Customize new convenient buttons (academic shortcuts)
|
||||||
|
Open `core_functional.py` with any text editor, add an entry as follows, and then restart the program. (If the button has been added successfully and is visible, both prefixes and suffixes can be hot-modified without having to restart the program.)
|
||||||
|
For example:
|
||||||
|
```
|
||||||
|
"Super English to Chinese": {
|
||||||
|
# Prefix, will be added before your input. For example, describe your requirements, such as translation, code interpretation, polishing, etc.
|
||||||
|
"Prefix": "Please translate the following content into Chinese, and then explain each proper noun that appears in the text with a markdown table:\n\n",
|
||||||
|
|
||||||
|
# Suffix, will be added after your input. For example, with the prefix, you can enclose your input content in quotes.
|
||||||
|
"Suffix": "",
|
||||||
|
},
|
||||||
|
```
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||||
|
</div>
|
||||||
|
|
||||||
|
2. Custom function plugin
|
||||||
|
|
||||||
|
Write powerful function plugins to perform any task you can and can't imagine.
|
||||||
|
The difficulty of debugging and writing plugins in this project is very low. As long as you have a certain knowledge of python, you can implement your own plugin function by imitating the template we provide.
|
||||||
|
Please refer to the [Function Plugin Guide](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) for details.
|
||||||
|
|
||||||
|
---
|
||||||
|
# Latest Update
|
||||||
|
## New feature dynamic
|
||||||
|
|
||||||
|
1. Сохранение диалогов. Вызовите "Сохранить текущий диалог" в разделе функций-плагина, чтобы сохранить текущий диалог как файл HTML, который можно прочитать и восстановить. Кроме того, вызовите «Загрузить архив истории диалога» в меню функций-плагина, чтобы восстановить предыдущую сессию. Совет: если нажать кнопку "Загрузить исторический архив диалога" без указания файла, можно просмотреть кэш исторических файлов HTML. Щелкните "Удалить все локальные записи истории диалогов", чтобы удалить все файловые кэши HTML.
|
||||||
|
|
||||||
|
2. Создание отчетов. Большинство плагинов создают рабочий отчет после завершения выполнения.
|
||||||
|
|
||||||
|
3. Модульный дизайн функций, простой интерфейс, но сильный функционал.
|
||||||
|
|
||||||
|
4. Это проект с открытым исходным кодом, который может «сам переводить себя».
|
||||||
|
|
||||||
|
5. Перевод других проектов с открытым исходным кодом - это не проблема.
|
||||||
|
|
||||||
|
6. Мелкие функции декорирования [live2d](https://github.com/fghrsh/live2d_demo) (по умолчанию отключены, нужно изменить `config.py`).
|
||||||
|
|
||||||
|
7. Поддержка большой языковой модели MOSS.
|
||||||
|
|
||||||
|
8. Генерация изображений с помощью OpenAI.
|
||||||
|
|
||||||
|
9. Анализ и подведение итогов аудиофайлов с помощью OpenAI.
|
||||||
|
|
||||||
|
10. Полный цикл проверки правописания с использованием LaTeX.
|
||||||
|
|
||||||
|
## Версии:
|
||||||
|
- Версия 3.5 (Todo): использование естественного языка для вызова функций-плагинов проекта (высокий приоритет)
|
||||||
|
- Версия 3.4 (Todo): улучшение многопоточной поддержки локальных больших моделей чата.
|
||||||
|
- Версия 3.3: добавлена функция объединения интернет-информации.
|
||||||
|
- Версия 3.2: функции-плагины поддерживают большое количество параметров (сохранение диалогов, анализирование любого языка программирования и одновременное запрос LLM-групп).
|
||||||
|
- Версия 3.1: поддержка одновременного запроса нескольких моделей GPT! Поддержка api2d, сбалансированное распределение нагрузки по нескольким ключам api.
|
||||||
|
- Версия 3.0: поддержка chatglm и других небольших LLM.
|
||||||
|
- Версия 2.6: перестройка структуры плагинов, улучшение интерактивности, добавлено больше плагинов.
|
||||||
|
- Версия 2.5: автоматическое обновление для решения проблемы длинного текста и переполнения токенов при обработке больших проектов.
|
||||||
|
- Версия 2.4: (1) добавлена функция полного перевода PDF; (2) добавлена функция переключения положения ввода; (3) добавлена опция вертикального макета; (4) оптимизация многопоточности плагинов.
|
||||||
|
- Версия 2.3: улучшение многопоточной интерактивности.
|
||||||
|
- Версия 2.2: функции-плагины поддерживают горячую перезагрузку.
|
||||||
|
- Версия 2.1: раскрывающийся макет.
|
||||||
|
- Версия 2.0: использование модульных функций-плагинов.
|
||||||
|
- Версия 1.0: базовые функции.
|
||||||
|
|
||||||
|
gpt_academic Разработчик QQ-группы-2: 610599535
|
||||||
|
|
||||||
|
- Известные проблемы
|
||||||
|
- Некоторые плагины перевода в браузерах мешают работе фронтенда этого программного обеспечения
|
||||||
|
- Высокая или низкая версия gradio может вызвать множество исключений
|
||||||
|
|
||||||
|
## Ссылки и учебные материалы
|
||||||
|
|
||||||
|
```
|
||||||
|
Мы использовали многие концепты кода из других отличных проектов, включая:
|
||||||
|
|
||||||
|
# Проект 1: Qinghua ChatGLM-6B:
|
||||||
|
https://github.com/THUDM/ChatGLM-6B
|
||||||
|
|
||||||
|
# Проект 2: Qinghua JittorLLMs:
|
||||||
|
https://github.com/Jittor/JittorLLMs
|
||||||
|
|
||||||
|
# Проект 3: Edge-GPT:
|
||||||
|
https://github.com/acheong08/EdgeGPT
|
||||||
|
|
||||||
|
# Проект 4: Chuanhu ChatGPT:
|
||||||
|
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||||
|
|
||||||
|
# Проект 5: ChatPaper:
|
||||||
|
https://github.com/kaixindelele/ChatPaper
|
||||||
|
|
||||||
|
# Больше:
|
||||||
|
https://github.com/gradio-app/gradio
|
||||||
|
https://github.com/fghrsh/live2d_demo
|
||||||
|
```
|
||||||
@@ -28,7 +28,7 @@
|
|||||||
| crazy_functions\批量Markdown翻译.py | 将指定目录下的Markdown文件进行中英文翻译 |
|
| crazy_functions\批量Markdown翻译.py | 将指定目录下的Markdown文件进行中英文翻译 |
|
||||||
| crazy_functions\批量总结PDF文档.py | 对PDF文件进行切割和摘要生成 |
|
| crazy_functions\批量总结PDF文档.py | 对PDF文件进行切割和摘要生成 |
|
||||||
| crazy_functions\批量总结PDF文档pdfminer.py | 对PDF文件进行文本内容的提取和摘要生成 |
|
| crazy_functions\批量总结PDF文档pdfminer.py | 对PDF文件进行文本内容的提取和摘要生成 |
|
||||||
| crazy_functions\PDF批量翻译.py | 将指定目录下的PDF文件进行中英文翻译 |
|
| crazy_functions\批量翻译PDF文档_多线程.py | 将指定目录下的PDF文件进行中英文翻译 |
|
||||||
| crazy_functions\理解PDF文档内容.py | 对PDF文件进行摘要生成和问题解答 |
|
| crazy_functions\理解PDF文档内容.py | 对PDF文件进行摘要生成和问题解答 |
|
||||||
| crazy_functions\生成函数注释.py | 自动生成Python函数的注释 |
|
| crazy_functions\生成函数注释.py | 自动生成Python函数的注释 |
|
||||||
| crazy_functions\联网的ChatGPT.py | 使用网络爬虫和ChatGPT模型进行聊天回答 |
|
| crazy_functions\联网的ChatGPT.py | 使用网络爬虫和ChatGPT模型进行聊天回答 |
|
||||||
@@ -187,9 +187,9 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和
|
|||||||
|
|
||||||
该程序文件是一个用于批量总结PDF文档的函数插件,使用了pdfminer插件和BeautifulSoup库来提取PDF文档的文本内容,对每个PDF文件分别进行处理并生成中英文摘要。同时,该程序文件还包括一些辅助工具函数和处理异常的装饰器。
|
该程序文件是一个用于批量总结PDF文档的函数插件,使用了pdfminer插件和BeautifulSoup库来提取PDF文档的文本内容,对每个PDF文件分别进行处理并生成中英文摘要。同时,该程序文件还包括一些辅助工具函数和处理异常的装饰器。
|
||||||
|
|
||||||
## [24/48] 请对下面的程序文件做一个概述: crazy_functions\PDF批量翻译.py
|
## [24/48] 请对下面的程序文件做一个概述: crazy_functions\批量翻译PDF文档_多线程.py
|
||||||
|
|
||||||
这个程序文件是一个Python脚本,文件名为“PDF批量翻译.py”。它主要使用了“toolbox”、“request_gpt_model_in_new_thread_with_ui_alive”、“request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency”、“colorful”等Python库和自定义的模块“crazy_utils”的一些函数。程序实现了一个批量翻译PDF文档的功能,可以自动解析PDF文件中的基础信息,递归地切割PDF文件,翻译和处理PDF论文中的所有内容,并生成相应的翻译结果文件(包括md文件和html文件)。功能比较复杂,其中需要调用多个函数和依赖库,涉及到多线程操作和UI更新。文件中有详细的注释和变量命名,代码比较清晰易读。
|
这个程序文件是一个Python脚本,文件名为“批量翻译PDF文档_多线程.py”。它主要使用了“toolbox”、“request_gpt_model_in_new_thread_with_ui_alive”、“request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency”、“colorful”等Python库和自定义的模块“crazy_utils”的一些函数。程序实现了一个批量翻译PDF文档的功能,可以自动解析PDF文件中的基础信息,递归地切割PDF文件,翻译和处理PDF论文中的所有内容,并生成相应的翻译结果文件(包括md文件和html文件)。功能比较复杂,其中需要调用多个函数和依赖库,涉及到多线程操作和UI更新。文件中有详细的注释和变量命名,代码比较清晰易读。
|
||||||
|
|
||||||
## [25/48] 请对下面的程序文件做一个概述: crazy_functions\理解PDF文档内容.py
|
## [25/48] 请对下面的程序文件做一个概述: crazy_functions\理解PDF文档内容.py
|
||||||
|
|
||||||
@@ -331,7 +331,7 @@ check_proxy.py, colorful.py, config.py, config_private.py, core_functional.py, c
|
|||||||
这些程序源文件提供了基础的文本和语言处理功能、工具函数和高级插件,使 Chatbot 能够处理各种复杂的学术文本问题,包括润色、翻译、搜索、下载、解析等。
|
这些程序源文件提供了基础的文本和语言处理功能、工具函数和高级插件,使 Chatbot 能够处理各种复杂的学术文本问题,包括润色、翻译、搜索、下载、解析等。
|
||||||
|
|
||||||
## 用一张Markdown表格简要描述以下文件的功能:
|
## 用一张Markdown表格简要描述以下文件的功能:
|
||||||
crazy_functions\代码重写为全英文_多线程.py, crazy_functions\图片生成.py, crazy_functions\对话历史存档.py, crazy_functions\总结word文档.py, crazy_functions\总结音视频.py, crazy_functions\批量Markdown翻译.py, crazy_functions\批量总结PDF文档.py, crazy_functions\批量总结PDF文档pdfminer.py, crazy_functions\PDF批量翻译.py, crazy_functions\理解PDF文档内容.py, crazy_functions\生成函数注释.py, crazy_functions\联网的ChatGPT.py, crazy_functions\解析JupyterNotebook.py, crazy_functions\解析项目源代码.py, crazy_functions\询问多个大语言模型.py, crazy_functions\读文章写摘要.py。根据以上分析,用一句话概括程序的整体功能。
|
crazy_functions\代码重写为全英文_多线程.py, crazy_functions\图片生成.py, crazy_functions\对话历史存档.py, crazy_functions\总结word文档.py, crazy_functions\总结音视频.py, crazy_functions\批量Markdown翻译.py, crazy_functions\批量总结PDF文档.py, crazy_functions\批量总结PDF文档pdfminer.py, crazy_functions\批量翻译PDF文档_多线程.py, crazy_functions\理解PDF文档内容.py, crazy_functions\生成函数注释.py, crazy_functions\联网的ChatGPT.py, crazy_functions\解析JupyterNotebook.py, crazy_functions\解析项目源代码.py, crazy_functions\询问多个大语言模型.py, crazy_functions\读文章写摘要.py。根据以上分析,用一句话概括程序的整体功能。
|
||||||
|
|
||||||
| 文件名 | 功能简述 |
|
| 文件名 | 功能简述 |
|
||||||
| --- | --- |
|
| --- | --- |
|
||||||
@@ -343,7 +343,7 @@ crazy_functions\代码重写为全英文_多线程.py, crazy_functions\图片生
|
|||||||
| 批量Markdown翻译.py | 将指定目录下的Markdown文件进行中英文翻译 |
|
| 批量Markdown翻译.py | 将指定目录下的Markdown文件进行中英文翻译 |
|
||||||
| 批量总结PDF文档.py | 对PDF文件进行切割和摘要生成 |
|
| 批量总结PDF文档.py | 对PDF文件进行切割和摘要生成 |
|
||||||
| 批量总结PDF文档pdfminer.py | 对PDF文件进行文本内容的提取和摘要生成 |
|
| 批量总结PDF文档pdfminer.py | 对PDF文件进行文本内容的提取和摘要生成 |
|
||||||
| PDF批量翻译.py | 将指定目录下的PDF文件进行中英文翻译 |
|
| 批量翻译PDF文档_多线程.py | 将指定目录下的PDF文件进行中英文翻译 |
|
||||||
| 理解PDF文档内容.py | 对PDF文件进行摘要生成和问题解答 |
|
| 理解PDF文档内容.py | 对PDF文件进行摘要生成和问题解答 |
|
||||||
| 生成函数注释.py | 自动生成Python函数的注释 |
|
| 生成函数注释.py | 自动生成Python函数的注释 |
|
||||||
| 联网的ChatGPT.py | 使用网络爬虫和ChatGPT模型进行聊天回答 |
|
| 联网的ChatGPT.py | 使用网络爬虫和ChatGPT模型进行聊天回答 |
|
||||||
|
|||||||
@@ -44,7 +44,7 @@
|
|||||||
"批量总结PDF文档": "BatchSummarizePDFDocuments",
|
"批量总结PDF文档": "BatchSummarizePDFDocuments",
|
||||||
"批量总结PDF文档pdfminer": "BatchSummarizePDFDocumentsUsingPdfminer",
|
"批量总结PDF文档pdfminer": "BatchSummarizePDFDocumentsUsingPdfminer",
|
||||||
"批量翻译PDF文档": "BatchTranslatePDFDocuments",
|
"批量翻译PDF文档": "BatchTranslatePDFDocuments",
|
||||||
"PDF批量翻译": "BatchTranslatePDFDocuments_MultiThreaded",
|
"批量翻译PDF文档_多线程": "BatchTranslatePDFDocuments_MultiThreaded",
|
||||||
"谷歌检索小助手": "GoogleSearchAssistant",
|
"谷歌检索小助手": "GoogleSearchAssistant",
|
||||||
"理解PDF文档内容标准文件输入": "UnderstandPdfDocumentContentStandardFileInput",
|
"理解PDF文档内容标准文件输入": "UnderstandPdfDocumentContentStandardFileInput",
|
||||||
"理解PDF文档内容": "UnderstandPdfDocumentContent",
|
"理解PDF文档内容": "UnderstandPdfDocumentContent",
|
||||||
@@ -1392,7 +1392,7 @@
|
|||||||
"1. 临时解决方案": "1. Temporary Solution",
|
"1. 临时解决方案": "1. Temporary Solution",
|
||||||
"直接在输入区键入api_key": "Enter the api_key Directly in the Input Area",
|
"直接在输入区键入api_key": "Enter the api_key Directly in the Input Area",
|
||||||
"然后回车提交": "Submit after pressing Enter",
|
"然后回车提交": "Submit after pressing Enter",
|
||||||
"2. 长效解决方案": "2. Long-term solution",
|
"2. 长效解决方案": "Long-term solution",
|
||||||
"在config.py中配置": "Configure in config.py",
|
"在config.py中配置": "Configure in config.py",
|
||||||
"等待响应": "Waiting for response",
|
"等待响应": "Waiting for response",
|
||||||
"api-key不满足要求": "API key does not meet requirements",
|
"api-key不满足要求": "API key does not meet requirements",
|
||||||
@@ -2184,8 +2184,7 @@
|
|||||||
"接驳VoidTerminal": "Connect to VoidTerminal",
|
"接驳VoidTerminal": "Connect to VoidTerminal",
|
||||||
"**很好": "**Very good",
|
"**很好": "**Very good",
|
||||||
"对话|编程": "Conversation&ImageGenerating|Programming",
|
"对话|编程": "Conversation&ImageGenerating|Programming",
|
||||||
"对话|编程|学术": "Conversation|Programming|Academic",
|
"对话|编程|学术": "Conversation&ImageGenerating|Programming|Academic", "4. 建议使用 GPT3.5 或更强的模型": "4. It is recommended to use GPT3.5 or a stronger model",
|
||||||
"4. 建议使用 GPT3.5 或更强的模型": "4. It is recommended to use GPT3.5 or a stronger model",
|
|
||||||
"「请调用插件翻译PDF论文": "Please call the plugin to translate the PDF paper",
|
"「请调用插件翻译PDF论文": "Please call the plugin to translate the PDF paper",
|
||||||
"3. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词": "3. If you use keywords such as 'call plugin xxx', 'modify configuration xxx', 'please', etc.",
|
"3. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词": "3. If you use keywords such as 'call plugin xxx', 'modify configuration xxx', 'please', etc.",
|
||||||
"以下是一篇学术论文的基本信息": "The following is the basic information of an academic paper",
|
"以下是一篇学术论文的基本信息": "The following is the basic information of an academic paper",
|
||||||
@@ -3007,746 +3006,5 @@
|
|||||||
"GPT-Academic对话存档": "TranslatedText",
|
"GPT-Academic对话存档": "TranslatedText",
|
||||||
"Arxiv论文精细翻译": "TranslatedText",
|
"Arxiv论文精细翻译": "TranslatedText",
|
||||||
"from crazy_functions.AdvancedFunctionTemplate import 测试图表渲染": "from crazy_functions.AdvancedFunctionTemplate import test_chart_rendering",
|
"from crazy_functions.AdvancedFunctionTemplate import 测试图表渲染": "from crazy_functions.AdvancedFunctionTemplate import test_chart_rendering",
|
||||||
"测试图表渲染": "test_chart_rendering",
|
"测试图表渲染": "test_chart_rendering"
|
||||||
"请使用「LatexEnglishCorrection+高亮修正位置": "Please use 'LatexEnglishCorrection+highlight corrected positions",
|
|
||||||
"输出代码片段中!": "Output code snippet!",
|
|
||||||
"使用多种方式尝试切分文本": "Attempt to split the text in various ways",
|
|
||||||
"你是一个作家": "You are a writer",
|
|
||||||
"如果无法从中得到答案": "If unable to get an answer from it",
|
|
||||||
"无法读取以下数据": "Unable to read the following data",
|
|
||||||
"不允许直接报错": "Direct error reporting is not allowed",
|
|
||||||
"您也可以使用插件参数指定绘制的图表类型": "You can also specify the type of chart to be drawn using plugin parameters",
|
|
||||||
"不要包含太多情节": "Do not include too many plots",
|
|
||||||
"翻译为中文后重新编译为PDF": "Recompile into PDF after translating into Chinese",
|
|
||||||
"采样温度": "Sampling temperature",
|
|
||||||
"直接修改config.py": "Directly modify config.py",
|
|
||||||
"处理文件": "Handle file",
|
|
||||||
"判断返回是否正确": "Determine if the return is correct",
|
|
||||||
"gemini 不允许对话轮次为偶数": "Gemini does not allow the number of dialogue rounds to be even",
|
|
||||||
"8 象限提示图": "8-quadrant prompt diagram",
|
|
||||||
"基于上下文的prompt模版": "Context-based prompt template",
|
|
||||||
"^开始": "^Start",
|
|
||||||
"输出文本的最大tokens限制": "Maximum tokens limit for output text",
|
|
||||||
"在这个例子中": "In this example",
|
|
||||||
"以及处理PDF文件的示例代码": "And example code for handling PDF files",
|
|
||||||
"更新cookie": "Update cookie",
|
|
||||||
"获取公共缩进": "Get public indentation",
|
|
||||||
"请你给出围绕“{subject}”的序列图": "Please provide a sequence diagram around '{subject}'",
|
|
||||||
"请确保使用小写的模型名称": "Please ensure the use of lowercase model names",
|
|
||||||
"出现人物时": "When characters appear",
|
|
||||||
"azure模型对齐支持 -=-=-=-=-=-=-": "Azure model alignment support -=-=-=-=-=-=-",
|
|
||||||
"请一分钟后重试": "Please try again in one minute",
|
|
||||||
"解析GEMINI消息出错": "Error parsing GEMINI message",
|
|
||||||
"选择提示词": "Select prompt words",
|
|
||||||
"取值范围是": "The value range is",
|
|
||||||
"它会在": "It will be",
|
|
||||||
"加载文件": "Load file",
|
|
||||||
"是预定义按钮": "Is a predefined button",
|
|
||||||
"消息": "Message",
|
|
||||||
"默认搜索5条结果": "Default search for 5 results",
|
|
||||||
"第 2 部分": "Part 2",
|
|
||||||
"我们采样一个特殊的手段": "We sample a special method",
|
|
||||||
"后端开发": "Backend development",
|
|
||||||
"接下来提取md中的一级/二级标题作为摘要": "Next, extract the first/second-level headings in md as summaries",
|
|
||||||
"一个年轻人穿过天安门广场向纪念堂走去": "A young person walks through Tiananmen Square towards the Memorial Hall",
|
|
||||||
"将会使用这些摘要绘制图表": "Will use these summaries to draw charts",
|
|
||||||
"8-象限提示图": "8-quadrant prompt diagram",
|
|
||||||
"首先": "First",
|
|
||||||
"设计了此接口": "Designed this interface",
|
|
||||||
"本地模型": "Local model",
|
|
||||||
"所有图像仅在最后一个问题中提供": "All images are provided only in the last question",
|
|
||||||
"如连续3次判断失败将会使用流程图进行绘制": "If there are 3 consecutive failures, a flowchart will be used to draw",
|
|
||||||
"为了更灵活地接入one-api多模型管理界面": "To access the one-api multi-model management interface more flexibly",
|
|
||||||
"UI设计": "UI design",
|
|
||||||
"不允许在答案中添加编造成分": "Fabrication is not allowed in the answer",
|
|
||||||
"尽可能地": "As much as possible",
|
|
||||||
"先在前端快速清除chatbot&status": "First, quickly clear chatbot & status in the frontend",
|
|
||||||
"You exceeded your current quota. Cohere以账户额度不足为由": "You exceeded your current quota. Cohere due to insufficient account quota",
|
|
||||||
"合并所有的标题": "Merge all headings",
|
|
||||||
"跳过下载": "Skip download",
|
|
||||||
"中生产图表": "Production Chart",
|
|
||||||
"如输入区内容不是文件则直接返回输入区内容": "Return the content of the input area directly if it is not a file",
|
|
||||||
"用温度取样的另一种方法": "Another method of temperature sampling",
|
|
||||||
"不需要解释原因": "No need to explain the reason",
|
|
||||||
"一场延续了两万年的星际战争已接近尾声": "An interstellar war that has lasted for 20,000 years is drawing to a close",
|
|
||||||
"依次处理文件": "Process files in order",
|
|
||||||
"第一幕的字数少于300字": "The first act has fewer than 300 characters",
|
|
||||||
"已成功加载": "Successfully loaded",
|
|
||||||
"还是web渲染": "Web rendering",
|
|
||||||
"解析分辨率": "Resolution parsing",
|
|
||||||
"如果剩余文本的token数大于限制": "If the number of remaining text tokens exceeds the limit",
|
|
||||||
"你可以修改整个句子的顺序以确保翻译后的段落符合中文的语言习惯": "You can change the order of the whole sentence to ensure that the translated paragraph is in line with Chinese language habits",
|
|
||||||
"并同时充分考虑中文的语法、清晰、简洁和整体可读性": "And at the same time, fully consider Chinese grammar, clarity, conciseness, and overall readability",
|
|
||||||
"否则返回": "Otherwise return",
|
|
||||||
"一个特殊标记": "A special mark",
|
|
||||||
"4. 后续剧情发展4": "4. Plot development",
|
|
||||||
"恢复默认": "Restore default",
|
|
||||||
"转义点号": "Escape period",
|
|
||||||
"检查DASHSCOPE_API_KEY": "Check DASHSCOPE_API_KEY",
|
|
||||||
"阿里灵积云API_KEY": "Aliyun API_KEY",
|
|
||||||
"文件是否存在": "Check if the file exists",
|
|
||||||
"您的选择是": "Your choice is",
|
|
||||||
"处理用户对话": "Handle user dialogue",
|
|
||||||
"即": "That is",
|
|
||||||
"将会由对话模型首先判断适合的图表类型": "The dialogue model will first determine the appropriate chart type",
|
|
||||||
"以查看所有的配置信息": "To view all configuration information",
|
|
||||||
"用于初始化包的属性和导入模块": "For initializing package properties and importing modules",
|
|
||||||
"to_markdown_tabs 文件list 转换为 md tab": "to_markdown_tabs Convert file list to MD tab",
|
|
||||||
"更换模型": "Replace Model",
|
|
||||||
"从以下文本中提取摘要": "Extract Summary from the Following Text",
|
|
||||||
"表示捕获任意长度的文本": "Indicates Capturing Text of Arbitrary Length",
|
|
||||||
"可能是一个模块的初始化文件": "May Be an Initialization File for a Module",
|
|
||||||
"处理提问与输出": "Handle Questions and Outputs",
|
|
||||||
"需要的再做些简单调整即可": "Some Simple Adjustments Needed",
|
|
||||||
"所以这个没有用": "So This Is Not Useful",
|
|
||||||
"请配置 DASHSCOPE_API_KEY": "Please Configure DASHSCOPE_API_KEY",
|
|
||||||
"不是预定义按钮": "Not a Predefined Button",
|
|
||||||
"让读者能够感受到你的故事世界": "Let Readers Feel Your Story World",
|
|
||||||
"开始整理headers与message": "Start Organizing Headers and Messages",
|
|
||||||
"兼容最新的智谱Ai": "Compatible with the Latest ZhiPu AI",
|
|
||||||
"对于某些PDF会有第一个段落就以小写字母开头": "For Some PDFs, the First Paragraph May Start with a Lowercase Letter",
|
|
||||||
"问题是": "The Issue Is",
|
|
||||||
"也就是说它会匹配尽可能少的字符": "That Is, It Will Match the Least Amount of Characters Possible",
|
|
||||||
"未能成功加载": "Failed to Load Successfully",
|
|
||||||
"接入通义千问在线大模型 https": "Access TongYi QianWen Online Large Model HTTPS",
|
|
||||||
"用不太优雅的方式处理一个core_functional.py中出现的mermaid渲染特例": "Handle a Mermaid Rendering Special Case in core_functional.py in an Ugly Way",
|
|
||||||
"您也可以选择给出其他故事走向": "You Can Also Choose to Provide Alternative Storylines",
|
|
||||||
"改善非markdown输入的显示效果": "Improve Display Effects for Non-Markdown Input",
|
|
||||||
"在二十二世纪编年史中": "In the Chronicle of the 22nd Century",
|
|
||||||
"docs 为Document列表": "docs Are a List of Documents",
|
|
||||||
"互动写故事": "Interactive Story Writing",
|
|
||||||
"4 饼图": "Pie Chart",
|
|
||||||
"正在生成插图中": "Generating Illustration",
|
|
||||||
"路径不存在": "Path Does Not Exist",
|
|
||||||
"PDF翻译中文": "PDF Translation to Chinese",
|
|
||||||
"进行简短的环境描写": "Conduct a Brief Environmental Description",
|
|
||||||
"学术英中互译": "Academic English-Chinese Translation",
|
|
||||||
"且少于2个段落": "And less than 2 paragraphs",
|
|
||||||
"html_view_blank 超链接": "HTML View Blank Hyperlink",
|
|
||||||
"处理 history": "Handle History",
|
|
||||||
"非Cohere官方接口返回了错误": "Non-Cohere Official Interface Returned an Error",
|
|
||||||
"缺失 MATHPIX_APPID 和 MATHPIX_APPKEY": "Missing MATHPIX_APPID and MATHPIX_APPKEY",
|
|
||||||
"搜索知识库内容条数": "Search Knowledge Base Content Count",
|
|
||||||
"返回数据": "Return Data",
|
|
||||||
"没有相关文件": "No Relevant Files",
|
|
||||||
"知识库路径": "Knowledge Base Path",
|
|
||||||
"质量与风格默认值": "Quality and Style Defaults",
|
|
||||||
"包含了用于文本切分的函数": "Contains Functions for Text Segmentation",
|
|
||||||
"请你给出围绕“{subject}”的逻辑关系图": "Please Provide a Logic Diagram Surrounding '{subject}'",
|
|
||||||
"官方Pro服务器🧪": "Official Pro Server",
|
|
||||||
"不支持同时处理多个pdf文件": "Does Not Support Processing Multiple PDF Files Simultaneously",
|
|
||||||
"查询5天历史事件": "Query 5-Day Historical Events",
|
|
||||||
"你是经验丰富的翻译": "You Are an Experienced Translator",
|
|
||||||
"html输入": "HTML Input",
|
|
||||||
"输入文件不存在": "Input File Does Not Exist",
|
|
||||||
"很多人生来就会莫名其妙地迷上一样东西": "Many People Are Born with an Unexplained Attraction to Something",
|
|
||||||
"默认值为 0.7": "Default Value is 0.7",
|
|
||||||
"值越大": "The Larger the Value",
|
|
||||||
"以下文件未能成功加载": "The Following Files Failed to Load",
|
|
||||||
"在线模型": "Online Model",
|
|
||||||
"切割输入": "Cut Input",
|
|
||||||
"修改docker-compose.yml等价于修改容器内部的环境变量": "Modifying docker-compose.yml is Equivalent to Modifying the Internal Environment Variables of the Container",
|
|
||||||
"以换行符分割": "Split by Line Break",
|
|
||||||
"修复中文乱码的问题": "Fix Chinese Character Encoding Issues",
|
|
||||||
"zhipuai 是glm-4的别名": "zhipuai is an alias for glm-4",
|
|
||||||
"保证其在允许范围内": "Ensure it is within the permissible range",
|
|
||||||
"段尾如果有多余的\\n就去掉它": "Remove any extra \\n at the end of the paragraph",
|
|
||||||
"是否流式输出": "Whether to stream output",
|
|
||||||
"1-流程图": "1-Flowchart",
|
|
||||||
"学术语料润色": "Academic text polishing",
|
|
||||||
"已经超过了模型的最大上下文或是模型格式错误": "Has exceeded the model's maximum context or there is a model format error",
|
|
||||||
"英文省略号": "English ellipsis",
|
|
||||||
"登录成功": "Login successful",
|
|
||||||
"随便切一下吧": "Just cut it randomly",
|
|
||||||
"PDF转换为tex项目失败": "PDF conversion to TeX project failed",
|
|
||||||
"的 max_token 配置不是整数": "The max_token configuration is not an integer",
|
|
||||||
"根据当前聊天历史或指定的路径文件": "According to the current chat history or specified path file",
|
|
||||||
"你必须利用以下文档中包含的信息回答这个问题": "You must use the information contained in the following document to answer this question",
|
|
||||||
"对话、日志记录": "Dialogue, logging",
|
|
||||||
"内容至知识库": "Content to knowledge base",
|
|
||||||
"在银河系的中心": "At the center of the Milky Way",
|
|
||||||
"检查PDF是否被重复上传": "Check if the PDF has been uploaded multiple times",
|
|
||||||
"取最后 max_prompt_tokens 个 token 输入模型": "Take the last max_prompt_tokens tokens as input to the model",
|
|
||||||
"请输入图类型对应的数字": "Please enter the corresponding number for the graph type",
|
|
||||||
"插件主程序3 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=": "Plugin main program 3 -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=",
|
|
||||||
"正在tex项目将翻译为中文": "The TeX project is being translated into Chinese",
|
|
||||||
"适配润色区域": "Adapter polishing area",
|
|
||||||
"首先你从历史记录中提取摘要": "First, you extract an abstract from the history",
|
|
||||||
"讯飞星火认知大模型 -=-=-=-=-=-=-": "iFLYTEK Spark Cognitive Model -=-=-=-=-=-=-=-=-=-",
|
|
||||||
"包含了用于构建和管理向量数据库的函数和类包含了用于构建和管理向量数据库的函数和类包含了用于构建和管理向量数据库的函数和类": "Contains functions and classes for building and managing vector databases",
|
|
||||||
"另外": "Additionally",
|
|
||||||
"内部调优参数": "Internal tuning parameters",
|
|
||||||
"输出格式例如": "Example of Output Format",
|
|
||||||
"当回复图像时": "When Responding with an Image",
|
|
||||||
"越权访问!": "Unauthorized Access!",
|
|
||||||
"如果给出的 prompt 的 token 长度超过此限制": "If the Given Prompt's Token Length Exceeds This Limit",
|
|
||||||
"因此你每次写的故事段落应少于300字": "Therefore, Each Story Paragraph You Write Should Be Less Than 300 Words",
|
|
||||||
"尽量短": "As Concise as Possible",
|
|
||||||
"中文提示词就不显示了": "Chinese Keywords Will Not Be Displayed",
|
|
||||||
"请在前文的基础上": "Please Based on the Previous Text",
|
|
||||||
"20张": "20 Sheets",
|
|
||||||
"文件内容优先": "File Content Takes Priority",
|
|
||||||
"状态图": "State Diagram",
|
|
||||||
"开始查找合适切分点的偏移": "Start Looking for the Offset of an Appropriate Split Point",
|
|
||||||
"已知信息": "Known Information",
|
|
||||||
"文心一言大模型": "Wenxin Yanyan Large Model",
|
|
||||||
"传递进来一些奇怪的东西": "Passing in Some Weird Things",
|
|
||||||
"很多规则中会考虑分号": "Many Rules Consider the Semicolon",
|
|
||||||
"请配置YUNQUE_SECRET_KEY": "Please Configure YUNQUE_SECRET_KEY",
|
|
||||||
"6-状态图": "6-State Diagram",
|
|
||||||
"输出文本的最小tokens限制": "Minimum Tokens Limit for Output Text",
|
|
||||||
"服务节点": "Service Node",
|
|
||||||
"云雀大模型": "Lark Large Model",
|
|
||||||
"请配置 GEMINI_API_KEY": "Please Configure GEMINI_API_KEY",
|
|
||||||
"可以让软件运行在 http": "Can Run the Software Over HTTP",
|
|
||||||
"基于当前对话或文件GenerateMultipleMermaidCharts": "Generate Multiple Mermaid Charts Based on the Current Conversation or File",
|
|
||||||
"剧情收尾": "Plot Conclusion",
|
|
||||||
"请开始提问": "Please Begin Your Question",
|
|
||||||
"第一页内容/摘要": "First Page Content/Summary",
|
|
||||||
"无法判断则返回image/jpeg": "Return image/jpeg If Unable to Determine",
|
|
||||||
"仅需要输出单个不带任何标点符号的数字": "Single digit without any punctuation",
|
|
||||||
"以下是每类图表的PROMPT": "Here are the PROMPTS for each type of chart",
|
|
||||||
"状态码": "Status code",
|
|
||||||
"TopP值越大输出的tokens类型越丰富": "The larger the TopP value, the richer the types of output tokens",
|
|
||||||
"files_filter_handler 根据type过滤文件": "files_filter_handler filters files by type",
|
|
||||||
"比较每一页的内容是否相同": "Compare whether each page's content is the same",
|
|
||||||
"前往": "Go to",
|
|
||||||
"请输入剧情走向": "Please enter the plot direction",
|
|
||||||
"故事收尾": "Story ending",
|
|
||||||
"必须说明正在回复哪张图像": "Must specify which image is being replied to",
|
|
||||||
"历史文件继续上传": "Continue uploading historical files",
|
|
||||||
"因此禁用": "Therefore disabled",
|
|
||||||
"使用lru缓存": "Use LRU caching",
|
|
||||||
"该装饰器是大多数功能调用的入口": "This decorator is the entry point for most function calls",
|
|
||||||
"如果需要开启": "If needed to enable",
|
|
||||||
"使用 json 解析库进行处理": "Process using JSON parsing library",
|
|
||||||
"将PDF转换为Latex项目": "Convert PDF to LaTeX project",
|
|
||||||
"7-实体关系图": "7-Entity relationship diagram",
|
|
||||||
"根据用户的提示": "According to the user's prompt",
|
|
||||||
"当前用户的请求信息": "Current user's request information",
|
|
||||||
"配置关联关系说明": "Configuration relationship description",
|
|
||||||
"这段代码是使用Python编程语言中的re模块": "This code uses the re module in the Python programming language",
|
|
||||||
"link_mtime_to_md 文件增加本地时间参数": "link_mtime_to_md adds local time parameter to the file",
|
|
||||||
"从当前对话或路径": "From the current conversation or path",
|
|
||||||
"一起写故事": "Write a story together",
|
|
||||||
"前端开发": "Front-end development",
|
|
||||||
"开区间": "Open interval",
|
|
||||||
"如插件参数不正确则使用对话模型判断": "If the plugin parameters are incorrect, use the dialogue model for judgment",
|
|
||||||
"对字符串进行处理": "Process the string",
|
|
||||||
"简洁和专业的来回答用户的问题": "Answer user questions concisely and professionally",
|
|
||||||
"如输入区不是文件则将输入区内容加入历史记录": "If the input area is not a file, add the content of the input area to the history",
|
|
||||||
"编写一个小说的第一幕": "Write the first act of a novel",
|
|
||||||
"更具创造性;": "More creative;",
|
|
||||||
"用于解析和翻译PDF文件的功能和相关辅助函数用于解析和翻译PDF文件的功能和相关辅助函数用于解析和翻译PDF文件的功能和相关辅助函数": "Functions and related auxiliary functions for parsing and translating PDF files",
|
|
||||||
"月之暗面 -=-=-=-=-=-=-": "The Dark Side of the Moon -=-=-=-=-=-=-",
|
|
||||||
"2. 后续剧情发展2": "2. Subsequent plot development 2",
|
|
||||||
"请先提供文本的更正版本": "Please provide the corrected version of the text first",
|
|
||||||
"修改环境变量": "Modify environment variables",
|
|
||||||
"读取之前的自定义按钮": "Read previous custom buttons",
|
|
||||||
"如果为0": "If it is 0",
|
|
||||||
"函数用于去除多行字符串的缩进": "Function to remove indentation from multiline strings",
|
|
||||||
"请绘制有关“": "Please draw something about \"",
|
|
||||||
"给出4种不同的后续剧情发展方向": "Provide 4 different directions for subsequent plot development",
|
|
||||||
"新调优版本GPT-4🔥": "Newly tuned version GPT-4🔥",
|
|
||||||
"已弃用": "Deprecated",
|
|
||||||
"参考 https": "Refer to https",
|
|
||||||
"发现重复上传": "Duplicate upload detected",
|
|
||||||
"本项目的所有配置都集中在config.py中": "All configurations for this project are centralized in config.py",
|
|
||||||
"默认值为 0.95": "Default value is 0.95",
|
|
||||||
"请查阅": "Please refer to",
|
|
||||||
"此选项已废弃": "This option is deprecated",
|
|
||||||
"找到了.doc文件": ".doc file found",
|
|
||||||
"他们的目的地是南极": "Their destination is Antarctica",
|
|
||||||
"lang_reference这段文字是": "The lang_reference text is",
|
|
||||||
"正在尝试生成对比PDF": "Attempting to generate a comparative PDF",
|
|
||||||
"input_encode_handler 提取input中的文件": "input_encode_handler Extracts files from input",
|
|
||||||
"使用中文": "Use Chinese",
|
|
||||||
"一些垃圾第三方接口会出现这样的错误": "Some crappy third-party interfaces may produce such errors",
|
|
||||||
"例如将空格转换为 ": "For example, converting spaces to  ",
|
|
||||||
"请你给出围绕“{subject}”的类图": "Please provide a class diagram around '{subject}'",
|
|
||||||
"是插件的内部参数": "Is an internal parameter of the plugin",
|
|
||||||
"网络波动时可选其他": "Alternative options when network fluctuates",
|
|
||||||
"非Cohere官方接口的出现这样的报错": "Such errors occur in non-Cohere official interfaces",
|
|
||||||
"是前缀": "Is a prefix",
|
|
||||||
"默认 None": "Default None",
|
|
||||||
"如果几天后能顺利到达那里": "If we can smoothly arrive there in a few days",
|
|
||||||
"输出1": "Output 1",
|
|
||||||
"3-类图": "3-Class Diagram",
|
|
||||||
"如需绘制思维导图请使用参数调用": "Please use parameters to call if you need to draw a mind map",
|
|
||||||
"正在将PDF转换为tex项目": "Converting PDF to TeX project",
|
|
||||||
"列出10个经典名著": "List 10 classic masterpieces",
|
|
||||||
"? 在这里用作非贪婪匹配": "? Used here as a non-greedy match",
|
|
||||||
"左上角更换模型菜单中可切换openai": "Switch to OpenAI in the model change menu in the top left corner",
|
|
||||||
"原样返回": "Return as is",
|
|
||||||
"请配置 MATHPIX_APPID 和 MATHPIX_APPKEY": "Please configure MATHPIX_APPID and MATHPIX_APPKEY",
|
|
||||||
"概括上述段落的内容以及内在逻辑关系": "Summarize the content of the above paragraph and its inherent logical relationship",
|
|
||||||
"cookie相关工具函数": "Cookie-related utility functions",
|
|
||||||
"请你给出围绕“{subject}”的饼图": "Please provide a pie chart around '{subject}'",
|
|
||||||
"原型设计": "Prototype design",
|
|
||||||
"必须为正数": "Must be a positive number",
|
|
||||||
"又一阵剧痛从肝部袭来": "Another wave of severe pain strikes from the liver",
|
|
||||||
"智谱AI": "Zhipu AI",
|
|
||||||
"基础功能区按钮的附加功能": "Additional functions of the basic functional area buttons",
|
|
||||||
"one-api 对齐支持 -=-=-=-=-=-=-": "one-api alignment support -=-=-=-=-=-=-",
|
|
||||||
"5 甘特图": "5 Gantt chart",
|
|
||||||
"用于初始化包的属性和导入模块是一个包的初始化文件": "The file used for initializing package properties and importing modules is an initialization file for the package",
|
|
||||||
"创建并修改config_private.py": "Create and modify config_private.py",
|
|
||||||
"会使输出更随机": "Would make the output more random",
|
|
||||||
"已添加": "Added",
|
|
||||||
"估计一个切分点": "Estimate a split point",
|
|
||||||
"\\n\\n1. 临时解决方案": "\\n\\n1. Temporary solution",
|
|
||||||
"没有回答": "No answer",
|
|
||||||
"尝试重新翻译PDF": "Try to retranslate the PDF",
|
|
||||||
"被这个解码给耍了": "Fooled by this decoding",
|
|
||||||
"再在后端清除history": "Clear history on the backend again",
|
|
||||||
"根据情况选择flowchart LR": "Choose flowchart LR based on the situation",
|
|
||||||
"幻方-深度求索大模型 -=-=-=-=-=-=-": "Deep Seek Large Model -=-=-=-=-=-=-",
|
|
||||||
"即使它们在历史记录中被提及": "Even if they are mentioned in the history",
|
|
||||||
"此处需要进一步优化逻辑": "Further logic optimization is needed here",
|
|
||||||
"借鉴自同目录下的bridge_ChatGPT.py": "Derived from the bridge_ChatGPT.py in the same directory",
|
|
||||||
"正是这样": "That's exactly right",
|
|
||||||
"您也可以给出您心中的其他故事走向": "You can also provide other story directions in your mind",
|
|
||||||
"文本预处理": "Text preprocessing",
|
|
||||||
"请登录": "Please log in",
|
|
||||||
"请修改docker-compose": "Please modify docker-compose",
|
|
||||||
"运行一些异步任务": "Run some asynchronous tasks",
|
|
||||||
"5-甘特图": "5-Gantt chart",
|
|
||||||
"3 类图": "3-Class diagram",
|
|
||||||
"因为你接下来将会与用户互动续写下面的情节": "Because you will interact with the user to continue writing the plot below",
|
|
||||||
"避免把同一个文件添加多次": "Avoid adding the same file multiple times",
|
|
||||||
"可挑选精度": "Selectable precision",
|
|
||||||
"调皮一下": "Play a joke",
|
|
||||||
"并解析": "And parse",
|
|
||||||
"您可以在输入框中输入一些关键词": "You can enter some keywords in the input box",
|
|
||||||
"文件加载失败": "File loading failed",
|
|
||||||
"请你给出围绕“{subject}”的甘特图": "Please provide a Gantt chart around \"{subject}\"",
|
|
||||||
"上传PDF": "Upload PDF",
|
|
||||||
"请判断适合使用的流程图类型": "Please determine the suitable flowchart type",
|
|
||||||
"错误码": "Error code",
|
|
||||||
"非markdown输入": "Non-markdown input",
|
|
||||||
"所以只能通过提示词对第几张图片进行定位": "So can only locate the image by the prompt",
|
|
||||||
"避免下载到缓存文件": "Avoid downloading cached files",
|
|
||||||
"没有思维导图!!!测试发现模型始终会优先选择思维导图": "No mind map!!! Testing found that the model always prioritizes mind maps",
|
|
||||||
"请登录Cohere查看详情 https": "Please log in to Cohere for details https",
|
|
||||||
"检查历史上传的文件是否与新上传的文件相同": "Check if the previously uploaded file is the same as the newly uploaded file",
|
|
||||||
"加载主题相关的工具函数": "Load theme-related utility functions",
|
|
||||||
"图表类型由模型判断": "Chart type is determined by the model",
|
|
||||||
"⭐ 多线程方法": "Multi-threading method",
|
|
||||||
"获取 max_token 的值": "Get the value of max_token",
|
|
||||||
"空白的输入栏": "Blank input field",
|
|
||||||
"根据整理的摘要选择图表类型": "Select chart type based on the organized summary",
|
|
||||||
"返回 True": "Return True",
|
|
||||||
"这里为了区分中英文情景搞复杂了一点": "Here it's a bit complicated to distinguish between Chinese and English contexts",
|
|
||||||
"ZHIPUAI_MODEL 配置项选项已经弃用": "ZHIPUAI_MODEL configuration option is deprecated",
|
|
||||||
"但是这里我把它忽略不计": "But here I ignore it",
|
|
||||||
"非必要": "Not necessary",
|
|
||||||
"思维导图": "Mind map",
|
|
||||||
"插件」": "Plugin",
|
|
||||||
"重复文件路径": "Duplicate file path",
|
|
||||||
"之间不要存在空格": "No spaces between fields",
|
|
||||||
"破折号、英文双引号等同样忽略": "Ignore dashes, English quotes, etc.",
|
|
||||||
"填写 VOLC_ACCESSKEY": "Enter VOLC_ACCESSKEY",
|
|
||||||
"称为核取样": "Called nuclear sampling",
|
|
||||||
"Incorrect API key. 请确保API key有效": "Incorrect API key. Please ensure the API key is valid",
|
|
||||||
"如输入区内容为文件则清空历史记录": "If the input area content is a file, clear the history",
|
|
||||||
"并处理精度问题": "And handle precision issues",
|
|
||||||
"并给出修改的理由": "And provide reasons for the changes",
|
|
||||||
"至此已经超出了正常接口应该进入的范围": "This has exceeded the scope that a normal interface should enter",
|
|
||||||
"并已加载知识库": "And the knowledge base has been loaded",
|
|
||||||
"file_manifest_filter_html 根据type过滤文件": "file_manifest_filter_html filters files by type",
|
|
||||||
"participant B as 系统": "participant B as System",
|
|
||||||
"要留出足够的互动空间": "Leave enough interaction space",
|
|
||||||
"请你给出围绕“{subject}”的实体关系图": "Please provide an entity relationship diagram around '{subject}'",
|
|
||||||
"答案请使用中文": "Please answer in Chinese",
|
|
||||||
"输出会更加稳定或确定": "The output will be more stable or certain",
|
|
||||||
"是一个包的初始化文件": "Is an initialization file for a package",
|
|
||||||
"用于加载和分割文件中的文本的通用文件加载器用于加载和分割文件中的文本的通用文件加载器用于加载和分割文件中的文本的通用文件加载器": "A universal file loader for loading and splitting text in files",
|
|
||||||
"围绕我选定的剧情情节": "Around the plot I have chosen",
|
|
||||||
"Mathpix 拥有执行PDF的OCR功能": "Mathpix has OCR functionality for PDFs",
|
|
||||||
"是否允许暴力切分": "Whether to allow violent segmentation",
|
|
||||||
"清空 txt_tmp 对应的位置方便下次搜索": "Clear the location corresponding to txt_tmp for easier next search",
|
|
||||||
"编写小说的最后一幕": "Write the last scene of the novel",
|
|
||||||
"可能是一个模块的初始化文件根据位置和名称": "May be an initialization file for a module based on position and name",
|
|
||||||
"更新新的自定义按钮": "Update new custom button",
|
|
||||||
"把分句符\\n放到双引号后": "Put the sentence separator \\n after the double quotes",
|
|
||||||
"序列图": "Sequence diagram",
|
|
||||||
"兼容非markdown输入": "Compatible with non-markdown input",
|
|
||||||
"那么就切": "Then cut",
|
|
||||||
"4-饼图": "4-Pie chart",
|
|
||||||
"结束剧情": "End of the plot",
|
|
||||||
"字数要求": "Word count requirement",
|
|
||||||
"以下是对以上文本的总结": "Below is a summary of the above text",
|
|
||||||
"但不要同时调整两个参数": "But do not adjust two parameters at the same time",
|
|
||||||
"📌省略": "Omit",
|
|
||||||
"请查看message": "Please check the message",
|
|
||||||
"如果所有页的内容都相同": "If all pages have the same content",
|
|
||||||
"我将在这4个选择中": "I will choose from these 4 options",
|
|
||||||
"请设置为True": "Please set to True",
|
|
||||||
"当 remain_txt_to_cut": "When remain_txt_to_cut",
|
|
||||||
"后续输出被截断": "Subsequent output is truncated",
|
|
||||||
"检查API_KEY": "Check API_KEY",
|
|
||||||
"阿里云实时语音识别 配置难度较高": "Alibaba Cloud real-time speech recognition has a higher configuration difficulty",
|
|
||||||
"图像生成提示为空白": "Image generation prompt is blank",
|
|
||||||
"由于实体关系图用到了{}符号": "Because the entity relationship diagram uses the {} symbol",
|
|
||||||
"系统繁忙": "System busy",
|
|
||||||
"月之暗面 API KEY": "Dark side of the moon API KEY",
|
|
||||||
"编写小说的下一幕": "Write the next scene of the novel",
|
|
||||||
"选择一种": "Choose one",
|
|
||||||
"或者flowchart TD": "Or flowchart TD",
|
|
||||||
"请把以下学术文章段落翻译成中文": "Please translate the following academic article paragraph into Chinese",
|
|
||||||
"7 实体关系图": "7 Entity relationship diagram",
|
|
||||||
"处理游戏的主体逻辑": "Handle the main logic of the game",
|
|
||||||
"请以“{headstart}”为开头": "Please start with \"{headstart}\"",
|
|
||||||
"匹配后单段上下文长度": "Length of single segment context after matching",
|
|
||||||
"先行者知道": "The pioneer knows",
|
|
||||||
"以及处理PDF文件的示例代码包含了用于文本切分的函数": "Example code for processing PDF files includes functions for text segmentation",
|
|
||||||
"未发现重复上传": "No duplicate uploads found",
|
|
||||||
"那么就不用切了": "Then there's no need to split",
|
|
||||||
"目前来说": "Currently",
|
|
||||||
"请在LLM_MODEL中配置": "Please configure in LLM_MODEL",
|
|
||||||
"是否启用上下文关联": "Whether to enable context association",
|
|
||||||
"为了加速计算": "To speed up calculations",
|
|
||||||
"登录请求": "Login request",
|
|
||||||
"这里解释一下正则表达式中的几个特殊字符": "Explanation of some special characters in regular expressions",
|
|
||||||
"其中数字对应关系为": "The corresponding relationship of the numbers is",
|
|
||||||
"修改配置有三种方法": "There are three ways to modify the configuration",
|
|
||||||
"请前往arxiv打开此论文下载页面": "Please go to arXiv and open the paper download page",
|
|
||||||
"然后download source手动下载latex源码包": "Then manually download the LaTeX source package by downloading the source",
|
|
||||||
"功能单元": "Functional unit",
|
|
||||||
"你需要翻译的文本如下": "The text you need to translate is as follows",
|
|
||||||
"以便于后续快速的匹配和查找操作": "To facilitate rapid matching and search operations later",
|
|
||||||
"文本内容": "Text content",
|
|
||||||
"自动更新、打开浏览器页面、预热tiktoken模块": "Auto-update, open browser page, warm up tiktoken module",
|
|
||||||
"原样传递": "Pass through as is",
|
|
||||||
"但是该文件格式不被支持": "But the file format is not supported",
|
|
||||||
"他现在是全宇宙中唯一的一个人了": "He is now the only person in the entire universe",
|
|
||||||
"取值范围0~1": "Value range 0~1",
|
|
||||||
"搜索匹配score阈值": "Search match score threshold",
|
|
||||||
"当字符串中有掩码tag时": "When there is a mask tag in the string",
|
|
||||||
"错误的不纳入对话": "Errors are not included in the conversation",
|
|
||||||
"英语": "English",
|
|
||||||
"象限提示图": "Quadrant prompt diagram",
|
|
||||||
"由于不管提供文本是什么": "Because regardless of what the provided text is",
|
|
||||||
"确定后续剧情的发展": "Determine the development of the subsequent plot",
|
|
||||||
"处理空输入导致报错的问题 https": "Handle the error caused by empty input",
|
|
||||||
"第 3 部分": "Part 3",
|
|
||||||
"不能等于 0 或 1": "Cannot be equal to 0 or 1",
|
|
||||||
"同时过大的图表可能需要复制到在线编辑器中进行渲染": "Large charts may need to be copied to an online editor for rendering",
|
|
||||||
"装饰器函数ArgsGeneralWrapper": "Decorator function ArgsGeneralWrapper",
|
|
||||||
"写个函数移除所有的换行符": "Write a function to remove all line breaks",
|
|
||||||
"默认为False": "Default is False",
|
|
||||||
"实例化BaiduSpider": "Instantiate BaiduSpider",
|
|
||||||
"9-思维导图": "Mind Map 9",
|
|
||||||
"是否开启跨域": "Whether to enable cross-domain",
|
|
||||||
"随机InteractiveMiniGame": "Random InteractiveMiniGame",
|
|
||||||
"用于构建HTML报告的类和方法用于构建HTML报告的类和方法用于构建HTML报告的类和方法": "Classes and methods for building HTML reports",
|
|
||||||
"这里填一个提示词字符串就行了": "Just fill in a prompt string here",
|
|
||||||
"文本切分": "Text segmentation",
|
|
||||||
"用于在生成mermaid图表时隐藏代码块": "Used to hide code blocks when generating mermaid charts",
|
|
||||||
"如果剩余文本的token数小于限制": "If the number of tokens in the remaining text is less than the limit",
|
|
||||||
"未能在规定时间内完成任务": "Failed to complete the task within the specified time",
|
|
||||||
"API key has been deactivated. Cohere以账户失效为由": "API key has been deactivated. Cohere cited account expiration as the reason",
|
|
||||||
"正在使用讯飞图片理解API": "Using the Xunfei Image Understanding API",
|
|
||||||
"如果您使用docker-compose部署": "If you deploy using docker-compose",
|
|
||||||
"最大输入 token 数": "Maximum input token count",
|
|
||||||
"遇到了控制请求速率限制": "Encountered control request rate limit",
|
|
||||||
"数值范围约为0-1100": "The numerical range is approximately 0-1100",
|
|
||||||
"几乎使他晕厥过去": "Almost made him faint",
|
|
||||||
"识图模型GPT-4V": "Image recognition model GPT-4V",
|
|
||||||
"零一万物模型 -=-=-=-=-=-=-": "Zero-One Universe Model",
|
|
||||||
"所有对话记录将自动保存在本地目录": "All conversation records will be saved automatically in the local directory",
|
|
||||||
"饼图": "Pie Chart",
|
|
||||||
"添加Live2D": "Add Live2D",
|
|
||||||
"⭐ 单线程方法": "Single-threaded Method",
|
|
||||||
"配图": "Illustration",
|
|
||||||
"根据上述已知信息": "Based on the Above Known Information",
|
|
||||||
"1. 后续剧情发展1": "1. Subsequent Plot Development 1",
|
|
||||||
"2-序列图": "Sequence Diagram",
|
|
||||||
"流程图": "Flowchart",
|
|
||||||
"需求分析": "Requirement Analysis",
|
|
||||||
"我认为更合理的是": "I Think a More Reasonable Approach Is",
|
|
||||||
"claude家族": "Claude Family",
|
|
||||||
"”的逻辑关系图": "Logic Relationship Diagram",
|
|
||||||
"给出人物的名字": "Provide the Names of Characters",
|
|
||||||
"无法自动下载该论文的Latex源码": "Unable to Automatically Download the LaTeX Source Code of the Paper",
|
|
||||||
"需要用户手动处理的信息": "Information That Requires Manual Processing by Users",
|
|
||||||
"点击展开“文件下载区”": "Click to Expand 'File Download Area'",
|
|
||||||
"生成长度过长": "Excessive Length Generated",
|
|
||||||
"\\n\\n2. 长效解决方案": "2. Long-term Solution",
|
|
||||||
"=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=": "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= Plugin Main Program 2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=",
|
|
||||||
"title 项目开发流程": "Title Project Development Process",
|
|
||||||
"如果您希望剧情立即收尾": "If You Want the Plot to End Immediately",
|
|
||||||
"空格转换为 ": "Space Converted to ",
|
|
||||||
"图片数量超过api上限": "Number of Images Exceeds API Limit",
|
|
||||||
"他知道": "He Knows",
|
|
||||||
"在这里输入自定义参数「分辨率-质量": "Enter Custom Parameters Here 'Resolution-Quality",
|
|
||||||
"例如ChatGLM&gpt-3.5-turbo&gpt-4": "For example ChatGLM, gpt-3.5-turbo, and gpt-4",
|
|
||||||
"账户管理": "Account Management",
|
|
||||||
"正在将翻译好的项目tex项目编译为PDF": "Compiling the Translated Project .tex Project into PDF",
|
|
||||||
"我们把 _max 后的文字转存至 remain_txt_to_cut_storage": "We save the text after _max to the remain_txt_to_cut_storage",
|
|
||||||
"标签之前停止匹配": "Stop matching before the label",
|
|
||||||
"例子": "Example",
|
|
||||||
"遍历检查是否有额外参数": "Iterate to check for extra parameters",
|
|
||||||
"文本分句长度": "Length of text segmentation",
|
|
||||||
"请你给出围绕“{subject}”的状态图": "Please provide a state diagram surrounding \"{subject}\"",
|
|
||||||
"用stream的方法避免中途网线被掐": "Use the stream method to avoid the cable being disconnected midway",
|
|
||||||
"然后在markdown表格中列出修改的内容": "Then list the changes in a Markdown table",
|
|
||||||
"以上是从文章中提取的摘要": "The above is an abstract extracted from the article",
|
|
||||||
"但是无法找到相关文件": "But unable to find the relevant file",
|
|
||||||
"上海AI-LAB书生大模型 -=-=-=-=-=-=-": "Shanghai AI-LAB Shu Sheng Large Model -=-=-=-=-=-=-",
|
|
||||||
"遇到第一个": "Meet the first",
|
|
||||||
"存储在名为const_extract_exp的变量中": "Stored in a variable named const_extract_exp",
|
|
||||||
"括号在正则表达式中表示捕获组": "Parentheses represent capture groups in regular expressions",
|
|
||||||
"那里的太空中渐渐隐现出一个方形区域": "A square area gradually appears in the space there",
|
|
||||||
"智谱GLM4超级模型🔥": "Zhipu GLM4 Super Model🔥",
|
|
||||||
"故事开头": "Beginning of the story",
|
|
||||||
"请检查文件格式是否正确": "Please check if the file format is correct",
|
|
||||||
"这个模式被编译成一个正则表达式对象": "This pattern is compiled into a regular expression object",
|
|
||||||
"单字符断句符": "Single character sentence break",
|
|
||||||
"看后续支持吧": "Let's see the follow-up support",
|
|
||||||
"markdown输入": "Markdown input",
|
|
||||||
"系统": "System",
|
|
||||||
"80字以内": "Within 80 characters",
|
|
||||||
"一个测试mermaid绘制图表的功能": "A function to test the Mermaid chart drawing",
|
|
||||||
"输入部分": "Input section",
|
|
||||||
"移除右侧逗号": "Remove the comma on the right",
|
|
||||||
"因此思维导图仅能通过参数调用": "Therefore, the mind map can only be invoked through parameters",
|
|
||||||
"6 状态图": "State Diagram",
|
|
||||||
"类图": "Class Diagram",
|
|
||||||
"不要重复前文": "Do not repeat the previous text",
|
|
||||||
"但内部": "But internally",
|
|
||||||
"小说的下一幕字数少于300字": "The next scene of the novel has fewer than 300 words",
|
|
||||||
"每个发展方向都精明扼要地用一句话说明": "Each development direction is concisely described in one sentence",
|
|
||||||
"充分考虑其之间的逻辑": "Fully consider the logic between them",
|
|
||||||
"兼顾前端状态的功能": "Take into account the functionality of the frontend state",
|
|
||||||
"1 流程图": "Flowchart",
|
|
||||||
"用户QQ群925365219": "User QQ Group 925365219",
|
|
||||||
"通义-本地模型 -=-=-=-=-=-=-": "Tongyi - Local Model",
|
|
||||||
"取值范围0-1000": "Value range 0-1000",
|
|
||||||
"但不是^*.开始": "But not ^*. Start",
|
|
||||||
"他们将钻出地壳去看诗云": "They will emerge from the crust to see the poetry cloud",
|
|
||||||
"我们正在互相讨论": "We are discussing with each other",
|
|
||||||
"值越小": "The smaller the value",
|
|
||||||
"请在以下几种故事走向中": "Please choose from the following story directions",
|
|
||||||
"请先把模型切换至gpt-*": "Please switch the model to gpt-* first",
|
|
||||||
"不再需要填写": "No longer needs to be filled out",
|
|
||||||
"深夜": "Late at night",
|
|
||||||
"小说的前文回顾": "Review of the previous text of the novel",
|
|
||||||
"项目文件树": "Project file tree",
|
|
||||||
"如果双引号前有终止符": "If there is a terminator before the double quotes",
|
|
||||||
"participant A as 用户": "Participant A as User",
|
|
||||||
"处理游戏初始化等特殊情况": "Handle special cases like game initialization",
|
|
||||||
"然后使用mermaid+llm绘制图表": "Then use mermaid+llm to draw charts",
|
|
||||||
"0表示不生效": "0 means not effective",
|
|
||||||
"在以下的剧情发展中": "In the following plot development",
|
|
||||||
"模型考虑具有 top_p 概率质量 tokens 的结果": "Model considering results with top_p probability quality tokens",
|
|
||||||
"根据字符串要给谁看": "Depending on who is intended to view the string",
|
|
||||||
"没有设置YIMODEL_API_KEY选项": "YIMODEL_API_KEY option is not set",
|
|
||||||
"换行符转换为": "Convert line breaks to",
|
|
||||||
"-风格": "-style",
|
|
||||||
"默认情况下并发量极低": "Default to a very low level of concurrency",
|
|
||||||
"为字符串加上上面定义的前缀和后缀": "Add the defined prefix and suffix to the string",
|
|
||||||
"先切换模型到gpt-*": "Switch the model to gpt-* first",
|
|
||||||
"它确保我们匹配的任意文本是尽可能短的": "It ensures that any text we match is as short as possible",
|
|
||||||
"积极地运用环境描写、人物描写等手法": "Actively use techniques such as environmental and character descriptions",
|
|
||||||
"零一万物": "Zero One Universe",
|
|
||||||
"html_local_file 本地文件取相对路径": "html_local_file takes the relative path of the local file",
|
|
||||||
"伊依一行三人乘坐一艘游艇在南太平洋上做吟诗航行": "Yi Yi and three others set sail on a yacht to recite poetry in the South Pacific",
|
|
||||||
"移除左边通配符": "Remove left wildcard characters",
|
|
||||||
"随后绘制图表": "Draw a chart subsequently",
|
|
||||||
"输入2": "Input 2",
|
|
||||||
"所以用最没有意义的一个点代替": "Therefore, replace it with the most meaningless point",
|
|
||||||
"等": "etc.",
|
|
||||||
"是本地文件": "Is a local file",
|
|
||||||
"正在文本切分": "Text segmentation in progress",
|
|
||||||
"等价于修改容器内部的环境变量": "Equivalent to modifying the environment variables inside the container",
|
|
||||||
"cohere等请求源": "Cohere and other request sources",
|
|
||||||
"我们再把 remain_txt_to_cut_storage 中的部分文字取出": "Then we extract part of the text from remain_txt_to_cut_storage",
|
|
||||||
"生成带掩码tag的字符串": "Generate a string with masked tags",
|
|
||||||
"智谱 -=-=-=-=-=-=-": "ZhiPu -=-=-=-=-=-=-",
|
|
||||||
"前缀字符串": "Prefix string",
|
|
||||||
"Temperature值越大随机性越大": "The larger the Temperature value, the greater the randomness",
|
|
||||||
"借用PDF切割中的函数对文本进行切割": "Use functions from PDF cutting to segment the text",
|
|
||||||
"挑选一种剧情发展": "Choose a plot development",
|
|
||||||
"将换行符转换为": "Convert line breaks to",
|
|
||||||
"0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens": "0.1 means the model decoder only considers taking tokens from the top 10% probability candidates",
|
|
||||||
"确定故事的下一步": "Determine the next step of the story",
|
|
||||||
"个文件的显示": "Display of a file",
|
|
||||||
"用于控制输出tokens的多样性": "Used to control the diversity of output tokens",
|
|
||||||
"导入BaiduSpider": "Import BaiduSpider",
|
|
||||||
"不输入则为模型自行判断": "If not entered, the model will judge on its own",
|
|
||||||
"准备下一次迭代": "Prepare for the next iteration",
|
|
||||||
"包含一些用于文本处理和模型微调的函数和装饰器包含一些用于文本处理和模型微调的函数和装饰器包含一些用于文本处理和模型微调的函数和装饰器": "Contains functions and decorators for text processing and model fine-tuning",
|
|
||||||
"由于没有单独的参数保存包含图片的历史": "Since there is no separate parameter to save the history with images",
|
|
||||||
"section 开发": "section development",
|
|
||||||
"注意这里没有掩码tag": "Note that there is no mask tag here",
|
|
||||||
"section 设计": "section design",
|
|
||||||
"对话|编程|学术|智能体": "Dialogue | Programming | Academic | Intelligent Agent",
|
|
||||||
"您只需要选择其中一种即可": "You only need to choose one of them",
|
|
||||||
"添加Live2D形象": "Add Live2D image",
|
|
||||||
"请用以下命令安装": "Please install with the following command",
|
|
||||||
"触发了Google的安全访问策略": "Triggered Google's safe access policy",
|
|
||||||
"参数示例「1024x1024-hd-vivid」 || 分辨率支持 「1024x1024」": "Parameter example '1024x1024-hd-vivid' || Resolution support '1024x1024'",
|
|
||||||
"结局除外": "Excluding the ending",
|
|
||||||
"subgraph 函数调用": "subgraph function call",
|
|
||||||
"项目示意图": "Project diagram",
|
|
||||||
"实体关系图": "Entity relationship diagram",
|
|
||||||
"计算机把他的代号定为M102": "The computer named his code M102",
|
|
||||||
"首先尝试用双空行": "Try using double empty lines first",
|
|
||||||
"接下来将判断适合的图表类型": "Next, determine the appropriate chart type",
|
|
||||||
"注意前面的几句都小心保留了双引号": "Note that the previous sentences have carefully preserved double quotes",
|
|
||||||
"您正在调用插件": "You are calling a plugin",
|
|
||||||
"从上到下": "From top to bottom",
|
|
||||||
"请配置HUOSHAN_API_KEY": "Please configure HUOSHAN_API_KEY",
|
|
||||||
"知识检索内容相关度 Score": "Knowledge retrieval content relevance score",
|
|
||||||
"所以不会被处理": "So it will not be processed",
|
|
||||||
"设置10秒即可": "Set to 10 seconds",
|
|
||||||
"以空格分割": "Separated by space",
|
|
||||||
"根据位置和名称": "According to position and name",
|
|
||||||
"一些垃圾第三方接口出现这样的错误": "Some crappy third-party interfaces have this error",
|
|
||||||
"////////////////////// 输入清除键 ///////////////////////////": "////////////////////// Input Clear Key ///////////////////////////",
|
|
||||||
"并解析为html or md 文本": "And parse as HTML or MD text",
|
|
||||||
"匹配单段内容的连接上下文长度": "Matching single section content connection context length",
|
|
||||||
"控制输出的随机性": "Control the randomness of output",
|
|
||||||
"是模型名": "Is model name",
|
|
||||||
"请检查配置文件": "Please check the configuration file",
|
|
||||||
"如何使用one-api快速接入": "How to quickly access using one-api",
|
|
||||||
"请求失败": "Request failed",
|
|
||||||
"追加列表": "Append list",
|
|
||||||
"////////////////////// 函数插件区 ///////////////////////////": "////////////////////// Function Plugin Area ///////////////////////////",
|
|
||||||
"你是WPSAi": "You are WPSAi",
|
|
||||||
"第五部分 一些文件处理方法": "Part Five Some file processing methods",
|
|
||||||
"圆圆迷上了肥皂泡": "Yuan Yuan is fascinated by soap bubbles",
|
|
||||||
"可选参数": "Optional parameters",
|
|
||||||
"one-api模型": "one-api model",
|
|
||||||
"port/gpt_academic/ 下": "Under port/gpt_academic/",
|
|
||||||
"下一段故事": "Next part of the story",
|
|
||||||
"* 表示前一个字符可以出现0次或多次": "* means the previous character can appear 0 or more times",
|
|
||||||
"向后兼容配置": "Backward compatible configuration",
|
|
||||||
"输出部分": "Output section",
|
|
||||||
"稍后": "Later",
|
|
||||||
"比如比喻、拟人、排比、对偶、夸张等等": "For example, similes, personification, parallelism, antithesis, hyperbole, etc.",
|
|
||||||
"是自定义按钮": "Is a custom button",
|
|
||||||
"你需要根据用户给出的小说段落": "You need to based on the novel paragraph given by the user",
|
|
||||||
"以mermaid flowchart的形式展示": "Display in the form of a mermaid flowchart",
|
|
||||||
"最后一幕的字数少于1000字": "The last scene has fewer than 1000 words",
|
|
||||||
"如没出错则保持为空": "Keep it empty if there are no errors",
|
|
||||||
"建议您根据应用场景调整 top_p 或 temperature 参数": "It is recommended to adjust the top_p or temperature parameters according to the application scenario",
|
|
||||||
"仿佛他的出生就是要和这东西约会似的": "As if his birth was meant to date this thing",
|
|
||||||
"处理特殊的渲染问题": "Handle special rendering issues",
|
|
||||||
"我认为最合理的故事结局是": "I think the most reasonable ending for the story is",
|
|
||||||
"请给出上方内容的思维导图": "Please provide a mind map of the content above",
|
|
||||||
"点other Formats": "Click on other Formats",
|
|
||||||
"文件加载完毕": "File loaded",
|
|
||||||
"Your account is not active. Cohere以账户失效为由": "Your account is not active. Cohere cites the account's inactivation as the reason",
|
|
||||||
"找不到任何.pdf文件": "Cannot find any .pdf files",
|
|
||||||
"请根据判断结果绘制相应的图表": "Please draw the corresponding chart based on the judgment result",
|
|
||||||
"积极地运用修辞手法": "Actively use rhetorical devices",
|
|
||||||
"工具函数 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-": "Utility function -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=",
|
|
||||||
"=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=": "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= Plugin Main Program 1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=",
|
|
||||||
"在": "In",
|
|
||||||
"即正则表达式库": "That is, the regular expression library",
|
|
||||||
"////////////////////// 基础功能区 ///////////////////////////": "////////////////////// Basic Function Area ///////////////////////////",
|
|
||||||
"并重新编译PDF | 输入参数为路径": "And recompile PDF | Input parameter is the path",
|
|
||||||
"甘特图": "Gantt Chart",
|
|
||||||
"但是需要注册账号": "But registration is required",
|
|
||||||
"获取完整的从Cohere返回的报错": "Get the complete error message returned from Cohere",
|
|
||||||
"合并摘要": "Merge Summary",
|
|
||||||
"这最后一课要提前讲了": "The last lesson will be taught ahead of schedule",
|
|
||||||
"大模型": "Large Model",
|
|
||||||
"查找输入区内容中的文件": "Find files in the input area content",
|
|
||||||
"预处理参数": "Preprocessing Parameters",
|
|
||||||
"这段代码定义了一个名为ProxyNetworkActivate的空上下文管理器": "This code defines an empty context manager named ProxyNetworkActivate",
|
|
||||||
"对话错误": "Dialogue Error",
|
|
||||||
"确定故事的结局": "Determine the ending of the story",
|
|
||||||
"第 1 部分": "Part 1",
|
|
||||||
"直到遇到括号外部最近的限定符": "Until the nearest qualifier outside the parentheses is encountered",
|
|
||||||
"负责向用户前端展示对话": "Responsible for displaying dialogue to the user frontend",
|
|
||||||
"查询内容": "Query Content",
|
|
||||||
"匹配结果更精准": "More accurate matching results",
|
|
||||||
"根据选择的图表类型绘制图表": "Draw a chart based on the selected chart type",
|
|
||||||
"空格、换行、空字符串都会报错": "Spaces, line breaks, and empty strings will all result in errors",
|
|
||||||
"请尝试削减单次输入的文本量": "Please try to reduce the amount of text in a single input",
|
|
||||||
"上传到路径": "Upload to path",
|
|
||||||
"中": "In",
|
|
||||||
"后缀字符串": "Suffix string",
|
|
||||||
"您还可以在接入one-api时": "You can also when accessing one-api",
|
|
||||||
"请说 “根据已知信息无法回答该问题” 或 “没有提供足够的相关信息”": "Please say 'Cannot answer the question based on available information' or 'Not enough relevant information is provided'",
|
|
||||||
"Cohere和API2D不会走这里": "Cohere and API2D will not go here",
|
|
||||||
"节点名字使用引号包裹": "Node names should be enclosed in quotes",
|
|
||||||
"这次的故事开头是": "The beginning of this story is",
|
|
||||||
"你是一个想象力丰富的杰出作家": "You are a brilliant writer with a rich imagination",
|
|
||||||
"正在与你的朋友互动": "Interacting with your friends",
|
|
||||||
"/「-hd」 || 风格支持 「-vivid」": "/ '-hd' || Style supports '-vivid'",
|
|
||||||
"如输入区无内容则直接解析历史记录": "If the input area is empty, parse the history directly",
|
|
||||||
"根据以上的情节": "Based on the above plot",
|
|
||||||
"将图表类型参数赋值为插件参数": "Set the chart type parameter to the plugin parameter",
|
|
||||||
"根据图片类型返回image/jpeg": "Return image/jpeg based on image type",
|
|
||||||
"如果lang_reference是英文": "If lang_reference is English",
|
|
||||||
"示意图": "Schematic diagram",
|
|
||||||
"完整参数列表": "Complete parameter list",
|
|
||||||
"仿佛灿烂的群星的背景被剪出一个方口": "As if the brilliant background of stars has been cut out into a square",
|
|
||||||
"如果没有找到合适的切分点": "If no suitable splitting point is found",
|
|
||||||
"获取数据": "Get data",
|
|
||||||
"内嵌的javascript代码": "Embedded JavaScript code",
|
|
||||||
"绘制多种mermaid图表": "Draw various mermaid charts",
|
|
||||||
"无效": "Invalid",
|
|
||||||
"查找pdf/md/word并获取文本内容并返回状态以及文本": "Search for pdf/md/word, retrieve text content, and return status and text",
|
|
||||||
"总结绘制脑图": "Summarize mind mapping",
|
|
||||||
"禁止杜撰不符合我选择的剧情": "Prohibit making up plots that do not match my choice",
|
|
||||||
"正在生成向量库": "Generating vector library",
|
|
||||||
"是LLM的内部调优参数": "Is an internal tuning parameter of LLM",
|
|
||||||
"请你选择一个合适的图表类型": "Please choose an appropriate chart type",
|
|
||||||
"请在“输入区”输入图像生成提示": "Please enter image generation prompts in the 'input area'",
|
|
||||||
"经测试设置为小于500时": "After testing, set it to less than 500",
|
|
||||||
"当然": "Certainly",
|
|
||||||
"必要": "Necessary",
|
|
||||||
"从左到右": "From left to right",
|
|
||||||
"接下来调用本地Latex翻译插件即可": "Next, call the local Latex translation plugin",
|
|
||||||
"如果相同则返回": "If the same, return",
|
|
||||||
"根据语言": "According to the language",
|
|
||||||
"使用mermaid语法": "Use mermaid syntax",
|
|
||||||
"这是游戏的第一步": "This is the first step of the game",
|
|
||||||
"构建后续剧情引导": "Building subsequent plot guidance",
|
|
||||||
"以满足 token 限制": "To meet the token limit",
|
|
||||||
"也就是说": "That is to say",
|
|
||||||
"mermaid语法举例": "Mermaid syntax example",
|
|
||||||
"发送": "Send",
|
|
||||||
"那么就只显示英文提示词": "Then only display English prompts",
|
|
||||||
"正在检查": "Checking",
|
|
||||||
"返回处理后的字符串": "Return the processed string",
|
|
||||||
"2 序列图": "Sequence diagram 2",
|
|
||||||
"yi-34b-chat-0205只有4k上下文": "yi-34b-chat-0205 has only 4k context",
|
|
||||||
"请检查配置": "Please check the configuration",
|
|
||||||
"请你给出围绕“{subject}”的象限图": "Please provide a quadrant diagram around '{subject}'",
|
|
||||||
"故事该结束了": "The story should end",
|
|
||||||
"修复缩进": "Fix indentation",
|
|
||||||
"请描述给出的图片": "Please describe the given image",
|
|
||||||
"启用插件热加载": "Enable plugin hot reload",
|
|
||||||
"通义-在线模型 -=-=-=-=-=-=-": "Tongyi - Online Model",
|
|
||||||
"比较页数是否相同": "Compare if the number of pages is the same",
|
|
||||||
"正式开始服务": "Officially start the service",
|
|
||||||
"使用mermaid flowchart对以上文本进行总结": "Summarize the above text using a mermaid flowchart",
|
|
||||||
"不是vision 才处理history": "Not only vision but also handle history",
|
|
||||||
"来定义了一个正则表达式模式": "Defined a regular expression pattern",
|
|
||||||
"IP地址等": "IP addresses, etc.",
|
|
||||||
"那么双引号才是句子的终点": "Then the double quotes mark the end of the sentence",
|
|
||||||
"输入1": "Input 1",
|
|
||||||
"/「1792x1024」/「1024x1792」 || 质量支持 「-standard」": "/'1792x1024'/ '1024x1792' || Quality support '-standard'",
|
|
||||||
"为了避免索引错误将其更改为大写": "To avoid indexing errors, change it to uppercase",
|
|
||||||
"搜索网页": "Search the web",
|
|
||||||
"用于控制生成文本的随机性和创造性": "Used to control the randomness and creativity of generated text",
|
|
||||||
"不能等于 0": "Cannot equal 0",
|
|
||||||
"在距地球五万光年的远方": "At a distance of fifty thousand light-years from Earth",
|
|
||||||
". 表示任意单一字符": ". represents any single character",
|
|
||||||
"选择预测值最大的k个token进行采样": "Select the k tokens with the largest predicted values for sampling",
|
|
||||||
"输出2": "Output 2",
|
|
||||||
"函数示意图": "Function Diagram",
|
|
||||||
"You are associated with a deactivated account. Cohere以账户失效为由": "You are associated with a deactivated account. Cohere due to account deactivation",
|
|
||||||
"3. 后续剧情发展3": "3. Subsequent Plot Development",
|
|
||||||
"并以“剧情收尾”四个字提示程序": "And use the four characters 'Plot Conclusion' as a prompt for the program",
|
|
||||||
"中文省略号": "Chinese Ellipsis",
|
|
||||||
"则不生效": "Will not take effect",
|
|
||||||
"目前是两位小数": "Currently is two decimal places",
|
|
||||||
"Incorrect API key. Cohere以提供了不正确的API_KEY为由": "Incorrect API key. Cohere reports an incorrect API_KEY."
|
|
||||||
}
|
}
|
||||||
@@ -44,7 +44,7 @@
|
|||||||
"批量总结PDF文档": "BatchSummarizePDFDocuments",
|
"批量总结PDF文档": "BatchSummarizePDFDocuments",
|
||||||
"批量总结PDF文档pdfminer": "BatchSummarizePDFDocumentsUsingPDFMiner",
|
"批量总结PDF文档pdfminer": "BatchSummarizePDFDocumentsUsingPDFMiner",
|
||||||
"批量翻译PDF文档": "BatchTranslatePDFDocuments",
|
"批量翻译PDF文档": "BatchTranslatePDFDocuments",
|
||||||
"PDF批量翻译": "BatchTranslatePDFDocumentsUsingMultiThreading",
|
"批量翻译PDF文档_多线程": "BatchTranslatePDFDocumentsUsingMultiThreading",
|
||||||
"谷歌检索小助手": "GoogleSearchAssistant",
|
"谷歌检索小助手": "GoogleSearchAssistant",
|
||||||
"理解PDF文档内容标准文件输入": "StandardFileInputForUnderstandingPDFDocumentContent",
|
"理解PDF文档内容标准文件输入": "StandardFileInputForUnderstandingPDFDocumentContent",
|
||||||
"理解PDF文档内容": "UnderstandingPDFDocumentContent",
|
"理解PDF文档内容": "UnderstandingPDFDocumentContent",
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
"Latex英文纠错加PDF对比": "CorrectEnglishInLatexWithPDFComparison",
|
"Latex英文纠错加PDF对比": "CorrectEnglishInLatexWithPDFComparison",
|
||||||
"下载arxiv论文并翻译摘要": "DownloadArxivPaperAndTranslateAbstract",
|
"下载arxiv论文并翻译摘要": "DownloadArxivPaperAndTranslateAbstract",
|
||||||
"Markdown翻译指定语言": "TranslateMarkdownToSpecifiedLanguage",
|
"Markdown翻译指定语言": "TranslateMarkdownToSpecifiedLanguage",
|
||||||
"PDF批量翻译": "BatchTranslatePDFDocuments_MultiThreaded",
|
"批量翻译PDF文档_多线程": "BatchTranslatePDFDocuments_MultiThreaded",
|
||||||
"下载arxiv论文翻译摘要": "DownloadArxivPaperTranslateAbstract",
|
"下载arxiv论文翻译摘要": "DownloadArxivPaperTranslateAbstract",
|
||||||
"解析一个Python项目": "ParsePythonProject",
|
"解析一个Python项目": "ParsePythonProject",
|
||||||
"解析一个Golang项目": "ParseGolangProject",
|
"解析一个Golang项目": "ParseGolangProject",
|
||||||
|
|||||||
@@ -43,7 +43,7 @@
|
|||||||
"批量总结PDF文档": "BatchSummarizePDFDocuments",
|
"批量总结PDF文档": "BatchSummarizePDFDocuments",
|
||||||
"批量总结PDF文档pdfminer": "BatchSummarizePDFDocumentsPdfminer",
|
"批量总结PDF文档pdfminer": "BatchSummarizePDFDocumentsPdfminer",
|
||||||
"批量翻译PDF文档": "BatchTranslatePDFDocuments",
|
"批量翻译PDF文档": "BatchTranslatePDFDocuments",
|
||||||
"PDF批量翻译": "BatchTranslatePdfDocumentsMultithreaded",
|
"批量翻译PDF文档_多线程": "BatchTranslatePdfDocumentsMultithreaded",
|
||||||
"谷歌检索小助手": "GoogleSearchAssistant",
|
"谷歌检索小助手": "GoogleSearchAssistant",
|
||||||
"理解PDF文档内容标准文件输入": "StandardFileInputForUnderstandingPdfDocumentContent",
|
"理解PDF文档内容标准文件输入": "StandardFileInputForUnderstandingPdfDocumentContent",
|
||||||
"理解PDF文档内容": "UnderstandingPdfDocumentContent",
|
"理解PDF文档内容": "UnderstandingPdfDocumentContent",
|
||||||
|
|||||||
@@ -1,58 +0,0 @@
|
|||||||
# 使用TTS文字转语音
|
|
||||||
|
|
||||||
|
|
||||||
## 1. 使用EDGE-TTS(简单)
|
|
||||||
|
|
||||||
将本项目配置项修改如下即可
|
|
||||||
|
|
||||||
```
|
|
||||||
TTS_TYPE = "EDGE_TTS"
|
|
||||||
EDGE_TTS_VOICE = "zh-CN-XiaoxiaoNeural"
|
|
||||||
```
|
|
||||||
|
|
||||||
## 2. 使用SoVITS(需要有显卡)
|
|
||||||
|
|
||||||
使用以下docker-compose.yml文件,先启动SoVITS服务API
|
|
||||||
|
|
||||||
1. 创建以下文件夹结构
|
|
||||||
```shell
|
|
||||||
.
|
|
||||||
├── docker-compose.yml
|
|
||||||
└── reference
|
|
||||||
├── clone_target_txt.txt
|
|
||||||
└── clone_target_wave.mp3
|
|
||||||
```
|
|
||||||
2. 其中`docker-compose.yml`为
|
|
||||||
```yaml
|
|
||||||
version: '3.8'
|
|
||||||
services:
|
|
||||||
gpt-sovits:
|
|
||||||
image: fuqingxu/sovits_gptac_trim:latest
|
|
||||||
container_name: sovits_gptac_container
|
|
||||||
working_dir: /workspace/gpt_sovits_demo
|
|
||||||
environment:
|
|
||||||
- is_half=False
|
|
||||||
- is_share=False
|
|
||||||
volumes:
|
|
||||||
- ./reference:/reference
|
|
||||||
ports:
|
|
||||||
- "19880:9880" # 19880 为 sovits api 的暴露端口,记住它
|
|
||||||
shm_size: 16G
|
|
||||||
deploy:
|
|
||||||
resources:
|
|
||||||
reservations:
|
|
||||||
devices:
|
|
||||||
- driver: nvidia
|
|
||||||
count: "all"
|
|
||||||
capabilities: [gpu]
|
|
||||||
command: bash -c "python3 api.py"
|
|
||||||
```
|
|
||||||
3. 其中`clone_target_wave.mp3`为需要克隆的角色音频,`clone_target_txt.txt`为该音频对应的文字文本( https://wiki.biligame.com/ys/%E8%A7%92%E8%89%B2%E8%AF%AD%E9%9F%B3 )
|
|
||||||
4. 运行`docker-compose up`
|
|
||||||
5. 将本项目配置项修改如下即可
|
|
||||||
(19880 为 sovits api 的暴露端口,与docker-compose.yml中的端口对应)
|
|
||||||
```
|
|
||||||
TTS_TYPE = "LOCAL_SOVITS_API"
|
|
||||||
GPT_SOVITS_URL = "http://127.0.0.1:19880"
|
|
||||||
```
|
|
||||||
6. 启动本项目
|
|
||||||
@@ -1,46 +0,0 @@
|
|||||||
# 使用VLLM
|
|
||||||
|
|
||||||
|
|
||||||
## 1. 首先启动 VLLM,自行选择模型
|
|
||||||
|
|
||||||
```
|
|
||||||
python -m vllm.entrypoints.openai.api_server --model /home/hmp/llm/cache/Qwen1___5-32B-Chat --tensor-parallel-size 2 --dtype=half
|
|
||||||
```
|
|
||||||
|
|
||||||
这里使用了存储在 `/home/hmp/llm/cache/Qwen1___5-32B-Chat` 的本地模型,可以根据自己的需求更改。
|
|
||||||
|
|
||||||
## 2. 测试 VLLM
|
|
||||||
|
|
||||||
```
|
|
||||||
curl http://localhost:8000/v1/chat/completions \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-d '{
|
|
||||||
"model": "/home/hmp/llm/cache/Qwen1___5-32B-Chat",
|
|
||||||
"messages": [
|
|
||||||
{"role": "system", "content": "You are a helpful assistant."},
|
|
||||||
{"role": "user", "content": "怎么实现一个去中心化的控制器?"}
|
|
||||||
]
|
|
||||||
}'
|
|
||||||
```
|
|
||||||
|
|
||||||
## 3. 配置本项目
|
|
||||||
|
|
||||||
```
|
|
||||||
API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"
|
|
||||||
LLM_MODEL = "vllm-/home/hmp/llm/cache/Qwen1___5-32B-Chat(max_token=4096)"
|
|
||||||
API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "http://localhost:8000/v1/chat/completions"}
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
"vllm-/home/hmp/llm/cache/Qwen1___5-32B-Chat(max_token=4096)"
|
|
||||||
其中
|
|
||||||
"vllm-" 是前缀(必要)
|
|
||||||
"/home/hmp/llm/cache/Qwen1___5-32B-Chat" 是模型名(必要)
|
|
||||||
"(max_token=6666)" 是配置(非必要)
|
|
||||||
```
|
|
||||||
|
|
||||||
## 4. 启动!
|
|
||||||
|
|
||||||
```
|
|
||||||
python main.py
|
|
||||||
```
|
|
||||||
30
docs/waifu_plugin/autoload.js
普通文件
30
docs/waifu_plugin/autoload.js
普通文件
@@ -0,0 +1,30 @@
|
|||||||
|
try {
|
||||||
|
$("<link>").attr({href: "file=docs/waifu_plugin/waifu.css", rel: "stylesheet", type: "text/css"}).appendTo('head');
|
||||||
|
$('body').append('<div class="waifu"><div class="waifu-tips"></div><canvas id="live2d" class="live2d"></canvas><div class="waifu-tool"><span class="fui-home"></span> <span class="fui-chat"></span> <span class="fui-eye"></span> <span class="fui-user"></span> <span class="fui-photo"></span> <span class="fui-info-circle"></span> <span class="fui-cross"></span></div></div>');
|
||||||
|
$.ajax({url: "file=docs/waifu_plugin/waifu-tips.js", dataType:"script", cache: true, success: function() {
|
||||||
|
$.ajax({url: "file=docs/waifu_plugin/live2d.js", dataType:"script", cache: true, success: function() {
|
||||||
|
/* 可直接修改部分参数 */
|
||||||
|
live2d_settings['hitokotoAPI'] = "hitokoto.cn"; // 一言 API
|
||||||
|
live2d_settings['modelId'] = 5; // 默认模型 ID
|
||||||
|
live2d_settings['modelTexturesId'] = 1; // 默认材质 ID
|
||||||
|
live2d_settings['modelStorage'] = false; // 不储存模型 ID
|
||||||
|
live2d_settings['waifuSize'] = '210x187';
|
||||||
|
live2d_settings['waifuTipsSize'] = '187x52';
|
||||||
|
live2d_settings['canSwitchModel'] = true;
|
||||||
|
live2d_settings['canSwitchTextures'] = true;
|
||||||
|
live2d_settings['canSwitchHitokoto'] = false;
|
||||||
|
live2d_settings['canTakeScreenshot'] = false;
|
||||||
|
live2d_settings['canTurnToHomePage'] = false;
|
||||||
|
live2d_settings['canTurnToAboutPage'] = false;
|
||||||
|
live2d_settings['showHitokoto'] = false; // 显示一言
|
||||||
|
live2d_settings['showF12Status'] = false; // 显示加载状态
|
||||||
|
live2d_settings['showF12Message'] = false; // 显示看板娘消息
|
||||||
|
live2d_settings['showF12OpenMsg'] = false; // 显示控制台打开提示
|
||||||
|
live2d_settings['showCopyMessage'] = false; // 显示 复制内容 提示
|
||||||
|
live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词
|
||||||
|
|
||||||
|
/* 在 initModel 前添加 */
|
||||||
|
initModel("file=docs/waifu_plugin/waifu-tips.json");
|
||||||
|
}});
|
||||||
|
}});
|
||||||
|
} catch(err) { console.log("[Error] JQuery is not defined.") }
|
||||||
二进制文件未显示。
@@ -0,0 +1,126 @@
|
|||||||
|
<?xml version="1.0" standalone="no"?>
|
||||||
|
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<metadata>
|
||||||
|
<json>
|
||||||
|
{
|
||||||
|
"fontFamily": "flat-ui-icons",
|
||||||
|
"majorVersion": 1,
|
||||||
|
"minorVersion": 1,
|
||||||
|
"fontURL": "http://designmodo.com/flat",
|
||||||
|
"designer": "Sergey Shmidt",
|
||||||
|
"designerURL": "http://designmodo.com",
|
||||||
|
"license": "Attribution-NonCommercial-NoDerivs 3.0 Unported",
|
||||||
|
"licenseURL": "http://creativecommons.org/licenses/by-nc-nd/3.0/",
|
||||||
|
"version": "Version 1.1",
|
||||||
|
"fontId": "flat-ui-icons",
|
||||||
|
"psName": "flat-ui-icons",
|
||||||
|
"subFamily": "Regular",
|
||||||
|
"fullName": "flat-ui-icons",
|
||||||
|
"description": "Generated by IcoMoon"
|
||||||
|
}
|
||||||
|
</json>
|
||||||
|
</metadata>
|
||||||
|
<defs>
|
||||||
|
<font id="flat-ui-icons" horiz-adv-x="1024">
|
||||||
|
<font-face units-per-em="1024" ascent="960" descent="-64" />
|
||||||
|
<missing-glyph horiz-adv-x="1024" />
|
||||||
|
<glyph unicode=" " d="" horiz-adv-x="512" />
|
||||||
|
<glyph unicode="" d="M896 192l-384 512-384-512h768z" />
|
||||||
|
<glyph unicode="" d="M128 704l384-512 384 512h-768z" />
|
||||||
|
<glyph unicode="" d="M896 256h-768l384 384 384-384z" />
|
||||||
|
<glyph unicode="" d="M512 256l-384 384h768l-384-384z" />
|
||||||
|
<glyph unicode="" d="M896 0l-768 448 768 448v-896z" />
|
||||||
|
<glyph unicode="" d="M128 896l768-448-768-448v896z" />
|
||||||
|
<glyph unicode="" d="M224.96 448.768l447.168 447.232 128-131.008-321.152-318.016 321.152-320.896-128.256-128.256-446.912 450.944z" />
|
||||||
|
<glyph unicode="" d="M353.152-2.112l-128.192 128.256 321.088 320.896-321.152 317.952 128 131.008 447.168-447.232-446.912-450.88z" />
|
||||||
|
<glyph unicode="" d="M928 351.936h-320v-319.936c0-35.392-28.608-64-64-64h-64c-35.328 0-64 28.608-64 64v319.936h-320c-35.328 0-64 28.736-64 64.064v64.064c0 35.328 28.672 63.872 64 63.872h320v320.064c0 35.328 28.672 64 64 64h64c35.392 0 64-28.672 64-64v-320.064h320c35.392 0 64-28.544 64-63.872v-64.064c0-35.328-28.608-64.064-64-64.064z" />
|
||||||
|
<glyph unicode="" d="M919.808 764.032c12.48-12.416 12.48-32.832 0-45.248l-248.896-249.024c-12.352-12.416-12.352-32.832 0-45.312l248.768-249.088c12.48-12.416 12.48-32.832 0-45.248l-90.624-90.432c-12.352-12.416-32.768-12.416-45.248 0l-248.64 249.088c-12.416 12.416-32.832 12.416-45.248 0l-248.896-248.896c-12.416-12.48-32.832-12.48-45.248 0l-90.496 90.624c-12.416 12.352-12.416 32.768 0 45.248l248.96 248.896c12.416 12.416 12.416 32.832 0 45.312l-248.768 249.024c-12.416 12.48-12.416 32.832 0 45.248l90.56 90.496c12.416 12.416 32.832 12.416 45.248 0l248.64-249.024c12.416-12.48 32.832-12.48 45.248-0.064l248.832 248.96c12.48 12.352 32.896 12.352 45.248 0l90.56-90.56z" />
|
||||||
|
<glyph unicode="" d="M923.136 822.592c-12.352 12.544-32.768 12.544-45.12 0l-476.16-474.496c-12.48-12.544-32.832-12.544-45.248 0l-208.64 212.736c-6.144 6.208-14.272 9.408-22.336 9.472-8.256 0-16.576-3.008-22.848-9.472l-92.16-83.008c-6.144-6.272-9.472-14.144-9.472-22.336 0-8.32 3.328-17.024 9.472-23.232l210.368-220.992c12.416-12.48 32.832-33.024 45.248-45.632l90.432-91.264c12.416-12.48 32.768-12.48 45.248 0l611.712 611.328c12.48 12.48 12.48 33.088 0 45.632l-90.496 91.264z" />
|
||||||
|
<glyph unicode="" d="M512 960c-281.6 0-512-230.4-512-512s230.4-512 512-512 512 230.4 512 512c0 281.6-230.4 512-512 512zM512 140.8c-168.96 0-307.2 138.24-307.2 307.2s138.24 307.2 307.2 307.2c168.96 0 307.2-138.24 307.2-307.2 0-168.96-138.24-307.2-307.2-307.2z" />
|
||||||
|
<glyph unicode="" d="M512 960c-281.6 0-512-230.4-512-512s230.4-512 512-512 512 230.4 512 512c0 281.6-230.4 512-512 512zM512 140.8c-168.96 0-307.2 138.24-307.2 307.2s138.24 307.2 307.2 307.2c168.96 0 307.2-138.24 307.2-307.2 0-168.96-138.24-307.2-307.2-307.2zM512 601.6c-87.040 0-153.6-66.56-153.6-153.6s66.56-153.6 153.6-153.6 153.6 66.56 153.6 153.6c0 87.040-66.56 153.6-153.6 153.6z" />
|
||||||
|
<glyph unicode="" d="M256 960h512c143.36 0 256-112.64 256-256v-512c0-143.36-112.64-256-256-256h-512c-143.36 0-256 112.64-256 256v512c0 143.36 112.64 256 256 256z" />
|
||||||
|
<glyph unicode="" d="M768 960h-512c-143.36 0-256-112.64-256-256v-512c0-143.36 112.64-256 256-256h512c143.36 0 256 112.64 256 256v512c0 143.36-112.64 256-256 256zM844.8 550.4l-368.64-368.64c-5.12-5.12-20.48-5.12-25.6 0l-56.32 56.32c-5.12 5.12-20.48 20.48-25.6 25.6l-128 133.12c-5.12 5.12-5.12 10.24-5.12 15.36s0 10.24 5.12 15.36l56.32 51.2c5.12 0 10.24 5.12 10.24 5.12 5.12 0 10.24 0 15.36-5.12l122.88-128c5.12-5.12 20.48-5.12 25.6 0l286.72 286.72c5.12 5.12 20.48 5.12 25.6 0l56.32-56.32c10.24-10.24 10.24-20.48 5.12-30.72z" />
|
||||||
|
<glyph unicode="" d="M512 960c-282.752 0-512-229.248-512-512 0-282.688 229.248-512 512-512 282.816 0 512 229.248 512 512 0 282.752-229.184 512-512 512zM576.768 195.136c0-37.056-28.992-67.072-64.768-67.072s-64.768 30.016-64.768 67.072v313.088c0 37.056 28.992 67.072 64.768 67.072s64.768-30.016 64.768-67.072v-313.088zM512 640.32c-35.776 0-64.768 28.608-64.768 63.872s28.992 63.744 64.768 63.744 64.768-28.544 64.768-63.808-28.992-63.808-64.768-63.808z" />
|
||||||
|
<glyph unicode="" d="M512 960c-282.752 0-512-229.248-512-512s229.248-512 512-512c282.752 0 512 229.248 512 512 0 282.752-229.248 512-512 512zM512 128.064c-35.776 0-64.768 28.544-64.768 63.808 0 35.2 28.992 63.808 64.768 63.808 35.776 0 64.768-28.608 64.768-63.808 0-35.264-28.992-63.808-64.768-63.808zM576.768 387.776c0-37.056-28.992-67.072-64.768-67.072-35.776 0-64.768 30.080-64.768 67.072v313.088c0 37.056 28.992 67.072 64.768 67.072 35.776 0 64.768-30.080 64.768-67.072v-313.088z" />
|
||||||
|
<glyph unicode="" d="M512-64c-282.752 0-512 229.248-512 512 0 282.688 229.248 512 512 512 282.752 0 512-229.248 512-512 0-282.752-229.248-512-512-512zM512 128.064c35.776 0 64.768 28.544 64.768 63.808 0 35.2-28.992 63.808-64.768 63.808-35.776 0-64.768-28.608-64.768-63.808 0-35.264 28.992-63.808 64.768-63.808zM650.752 724.288c-33.92 27.904-82.24 43.456-140.032 43.456-42.56 0-78.912-7.68-110.144-20.16-16.576-6.72-69.632-39.68-80.64-48.896l32.384-48.32c5.312-9.344 13.952-14.080 25.92-14.080 4.992 0 10.624 1.984 16.96 5.888 4.608 2.88 41.088 21.696 56.512 26.368 32.32 9.6 67.84 5.696 84.16 0.64 22.272-6.848 38.4-19.904 47.36-37.76 5.888-11.776 13.376-44.16-4.224-74.432-14.656-25.088-37.568-44.16-62.848-61.056-13.504-9.216-26.048-18.624-37.376-28.416-0.512 0-1.792-0.96-4.672-3.52 1.408 1.216 3.264 2.304 4.672 3.52 3.2 0.128-30.784-43.328-30.784-83.52 0-42.88 0-64 0-64h128v64c0 33.28 16.128 51.968 16.448 56.704 11.008 7.872 61.056 46.144 72.96 59.904 22.208 25.6 38.592 59.392 38.592 107.008 0 48.832-19.392 88.832-53.248 116.672z" />
|
||||||
|
<glyph unicode="" d="M512 960c-282.752 0-512-229.184-512-511.936 0-282.816 229.248-512.064 512-512.064 282.752 0 512 229.248 512 512.064 0 282.752-229.248 511.936-512 511.936zM842.88 552.128l-367.296-367.232c-7.488-7.488-19.712-7.488-27.136 0l-54.272 54.784c-7.424 7.552-19.712 19.904-27.136 27.392l-126.336 132.8c-3.712 3.712-5.696 8.96-5.696 13.888 0 4.992 1.984 9.728 5.696 13.504l55.36 49.92c3.776 3.84 8.768 5.632 13.696 5.632 4.864-0.064 9.728-1.984 13.44-5.632l125.248-127.872c7.488-7.616 19.648-7.616 27.136 0l285.888 285.12c7.424 7.488 19.712 7.488 27.136 0l54.336-54.912c7.424-7.488 7.424-19.84-0.064-27.392z" />
|
||||||
|
<glyph unicode="" d="M874.048 810.048c-199.936 200-524.096 199.936-724.096 0-199.936-199.872-199.936-524.096 0.064-724.032 199.936-199.936 524.096-199.936 724.032-0.064 200 199.936 200 524.16 0 724.096zM747.2 309.056c27.52-27.52 28.224-71.296 1.728-97.856-26.56-26.56-70.4-25.728-97.792 1.728l-139.072 139.008-139.584-139.584c-27.52-27.456-71.296-28.224-97.792-1.728-26.56 26.56-25.728 70.4 1.664 97.856l139.648 139.584-139.648 139.648c-27.456 27.392-28.224 71.168-1.664 97.728 26.496 26.56 70.336 25.792 97.792-1.664l139.584-139.584 139.072 139.072c27.456 27.456 71.232 28.224 97.792 1.664 26.496-26.56 25.728-70.336-1.728-97.792l-139.008-139.072 139.008-139.008z" />
|
||||||
|
<glyph unicode="" d="M512 960.064c-282.752 0-512-229.312-512-512.064 0-282.816 229.248-512.064 512-512.064s512 229.248 512 512.064c0 282.752-229.248 512.064-512 512.064zM764.224 383.296h-187.392v-187.52c0-36.992-28.992-67.072-64.768-67.072s-64.768 30.080-64.768 67.072v187.52h-188.16c-36.992 0-67.072 28.928-67.072 64.704s30.080 64.768 67.072 64.768h188.16v188.16c0 37.056 28.992 67.072 64.768 67.072s64.768-30.016 64.768-67.072v-188.16h187.456c37.056 0 67.072-29.056 67.072-64.768s-30.016-64.704-67.136-64.704z" />
|
||||||
|
<glyph unicode="" d="M288 960h-192c-35.328 0-64-28.608-64-64v-896c0-35.392 28.672-64 64-64h192c35.328 0 64 28.608 64 64v896c0 35.392-28.672 64-64 64zM928 960h-192c-35.392 0-64-28.608-64-64v-896c0-35.392 28.608-64 64-64h192c35.392 0 64 28.608 64 64v896c0 35.392-28.608 64-64 64z" />
|
||||||
|
<glyph unicode="" d="M880 475.776l-832 480c-9.856 5.696-22.144 5.696-32 0-9.856-5.76-16-16.32-16-27.776v-960c0-11.456 6.144-22.016 16-27.712 4.928-2.88 10.496-4.288 16-4.288s11.072 1.408 16 4.288l832 480c9.856 5.696 16 16.256 16 27.712s-6.144 22.016-16 27.776z" />
|
||||||
|
<glyph unicode="" d="M493.184 896c-48.384 0-63.040-27.84-63.040-27.84s-183.104-216.192-266.56-216.192c-82.176 0-81.344 0-81.344 0-45.44 0-82.24-36.416-82.24-81.28v-244.096c0-44.928 36.8-81.28 82.176-81.28 0 0 1.344 0 82.176 0 81.024 0 269.568-218.88 269.568-218.88 14.912-15.488 35.904-25.152 59.264-25.152 45.376 0 82.176 36.352 82.176 81.28v732.096c0 44.928-36.8 81.344-82.176 81.344zM843.968 817.728l-47.424-70.976c86.656-70.4 142.208-177.728 142.208-298.176s-55.488-227.84-142.208-298.112l47.424-70.976c109.44 85.888 180.032 219.136 180.032 369.088 0 150.016-70.592 283.2-180.032 369.152zM748.8 675.328l-47.872-71.68c41.344-38.912 67.392-93.76 67.392-155.072s-26.048-116.096-67.392-155.072l47.872-71.616c63.872 54.72 104.576 136 104.576 226.688 0 90.816-40.704 171.968-104.576 226.752z" />
|
||||||
|
<glyph unicode="" d="M492.8 896c-51.2 0-64-25.6-64-25.6s-179.2-217.6-262.4-217.6c-83.2 0-83.2 0-83.2 0-44.8 0-83.2-38.4-83.2-83.2v-243.2c0-44.8 38.4-83.2 83.2-83.2 0 0 0 0 83.2 0 83.2 0 268.8-217.6 268.8-217.6 12.8-12.8 32-25.6 57.6-25.6 44.8 0 83.2 38.4 83.2 83.2v729.6c0 44.8-38.4 83.2-83.2 83.2z" />
|
||||||
|
<glyph unicode="" d="M832 640l-213.056-208.448-125.696 125.696 210.752 210.688-160 160.064h448v-448l-160 160zM526.976 342.528l-206.976-202.496 167.488-172.032h-455.488v452.288l160-164.288 210.752 210.752 124.224-124.224z" />
|
||||||
|
<glyph unicode="" d="M991.936 863.36h-959.872c-17.6 0-32-15.36-32-34.176v-124.672c0-18.048 14.4-32.832 32-32.832h959.872c17.6 0 32 14.72 32 32.832v124.672c0 18.816-14.4 34.176-32 34.176zM991.936 543.36h-959.872c-17.6 0-32-15.36-32-34.24v-124.608c0-18.112 14.4-32.832 32-32.832h959.872c17.6 0 32 14.72 32 32.832v124.672c0 18.816-14.4 34.176-32 34.176zM991.936 223.36h-959.872c-17.6 0-32-15.36-32-34.24v-124.608c0-17.984 14.4-32.768 32-32.768h959.872c17.6 0 32 14.72 32 32.768v124.608c0 18.88-14.4 34.24-32 34.24z" />
|
||||||
|
<glyph unicode="" d="M352 896h-320c-19.2 0-32-12.8-32-32v-320c0-19.2 12.8-32 32-32h320c19.2 0 32 12.8 32 32v320c0 19.2-12.8 32-32 32zM352 384h-320c-19.2 0-32-12.8-32-32v-320c0-19.2 12.8-32 32-32h320c19.2 0 32 12.8 32 32v320c0 19.2-12.8 32-32 32zM992 896h-448c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h448c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 640h-448c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h448c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 384h-448c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h448c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 128h-448c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h448c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32z" />
|
||||||
|
<glyph unicode="" d="M288 896h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32zM288 576h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32zM608 896h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32zM608 576h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32zM928 896h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32zM928 576h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32zM288 256h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32zM608 256h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32zM928 256h-192c-19.2 0-32-12.8-32-32v-192c0-19.2 12.8-32 32-32h192c19.2 0 32 12.8 32 32v192c0 19.2-12.8 32-32 32z" />
|
||||||
|
<glyph unicode="" d="M416 960h-384c-19.2 0-32-12.8-32-32v-384c0-19.2 12.8-32 32-32h384c19.2 0 32 12.8 32 32v384c0 19.2-12.8 32-32 32zM992 960h-384c-19.2 0-32-12.8-32-32v-384c0-19.2 12.8-32 32-32h384c19.2 0 32 12.8 32 32v384c0 19.2-12.8 32-32 32zM416 384h-384c-19.2 0-32-12.8-32-32v-384c0-19.2 12.8-32 32-32h384c19.2 0 32 12.8 32 32v384c0 19.2-12.8 32-32 32zM992 384h-384c-19.2 0-32-12.8-32-32v-384c0-19.2 12.8-32 32-32h384c19.2 0 32 12.8 32 32v384c0 19.2-12.8 32-32 32z" />
|
||||||
|
<glyph unicode="" d="M992 896h-768c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h768c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 640h-768c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h768c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 384h-768c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h768c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 128h-768c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h768c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM96 896h-64c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h64c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM96 640h-64c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h64c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM96 384h-64c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h64c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM96 128h-64c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h64c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32z" />
|
||||||
|
<glyph unicode="" d="M992 896h-960c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h960c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 640h-960c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h960c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 384h-960c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h960c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 128h-960c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h960c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32z" />
|
||||||
|
<glyph unicode="" d="M992 832h-640c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h640c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 512h-640c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h640c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM992 192h-640c-19.2 0-32-12.8-32-32v-64c0-19.2 12.8-32 32-32h640c19.2 0 32 12.8 32 32v64c0 19.2-12.8 32-32 32zM256 768c0-70.692-57.308-128-128-128-70.692 0-128 57.308-128 128 0 70.692 57.308 128 128 128 70.692 0 128-57.308 128-128zM256 448c0-70.692-57.308-128-128-128-70.692 0-128 57.308-128 128 0 70.692 57.308 128 128 128 70.692 0 128-57.308 128-128zM256 128c0-70.692-57.308-128-128-128-70.692 0-128 57.308-128 128 0 70.692 57.308 128 128 128 70.692 0 128-57.308 128-128z" />
|
||||||
|
<glyph unicode="" d="M896 960h-768c-70.656 0-128-57.344-128-128v-768c0-70.656 57.344-128 128-128h768c70.656 0 128 57.344 128 128v768c0 70.656-57.344 128-128 128zM384 895.936c35.328 0 64-28.608 64-63.936 0-35.392-28.672-64-64-64s-64 28.608-64 64c0 35.328 28.672 63.936 64 63.936zM192 895.936c35.328 0 64-28.608 64-63.936 0-35.392-28.672-64-64-64s-64 28.608-64 64c0 35.328 28.672 63.936 64 63.936zM896.064 64h-768.064v640h768.064v-640z" />
|
||||||
|
<glyph unicode="" d="M938.752 767.744h-106.688v106.624c0 47.104-38.208 85.312-85.312 85.312h-661.44c-47.104 0-85.312-38.208-85.312-85.312v-660.672c0-47.168 37.248-85.376 83.136-85.376h108.864v-106.688c0-47.104 37.248-85.312 83.136-85.312h665.792c45.952 0 83.2 38.208 83.2 85.312v660.736c-0.064 47.104-38.272 85.376-85.376 85.376zM384 895.616c35.328 0 64-28.608 64-63.936 0-35.392-28.672-64-64-64s-64 28.608-64 64c0 35.328 28.672 63.936 64 63.936zM192 895.616c35.328 0 64-28.608 64-63.936 0-35.392-28.672-64-64-64s-64 28.608-64 64c0 35.328 28.672 63.936 64 63.936zM128 255.68l-0.064 448h576.064v-448h-576zM896 63.68h-576v64.64h428.864c45.952 0 83.2 38.208 83.2 85.376v297.984h63.936v-448z" />
|
||||||
|
<glyph unicode="" d="M768 191.936c-121.6 0-197.888 68.736-256 144.448-58.112-75.712-134.4-144.448-256-144.448-102.848 0-256 68.224-256 256.064 0 187.776 153.152 256 256 256 121.6 0 197.888-68.672 256-144.448 58.112 75.776 134.4 144.448 256 144.448 102.912 0 256-68.224 256-256 0-187.84-153.088-256.064-256-256.064zM256 576c-29.632-0.512-128-11.136-128-128 0-121.856 106.624-128 128-128 78.272 0 123.264 47.808 178.752 128-55.488 80.128-100.48 128-178.752 128zM589.248 448c55.424-80.128 100.352-127.872 178.432-128 30.336 0.448 128.32 11.264 128.32 128 0 121.856-106.624 128-128 128-78.272 0-123.264-47.872-178.752-128z" />
|
||||||
|
<glyph unicode="" d="M800 512c-22.976 0-59.328 0-96 0v-128c22.656 0 44.8 0 64 0 12.096 0 23.296 0 32 0 123.712 0 224-100.288 224-224s-100.288-224-224-224-224 100.224-224 224c0 22.976 0 59.264 0 96h-128c0-22.656 0-44.864 0-64 0-12.096 0-23.232 0-32 0-123.776-100.288-224-224-224s-224 100.224-224 224 100.288 224 224 224c22.976 0 59.328 0 96 0v128c-22.592 0-44.864 0-64 0-12.096 0-23.232 0-32 0-123.712 0-224 100.224-224 224 0 123.712 100.288 224 224 224s224-100.288 224-224c0-22.976 0-59.328 0-96h128c0 22.592 0 44.864 0 64 0 12.096 0 23.232 0 32 0 123.712 100.288 224 224 224s224-100.288 224-224c0-123.776-100.288-224-224-224zM320 736c0 52.992-43.008 96-96 96s-96-43.008-96-96c0-53.056 43.008-96 96-96 7.744 0 19.52 0 32 0 29.568 0 64 0 64 0s0 69.056 0 96zM320 192c0 29.504 0 64 0 64s-69.056 0-96 0c-52.992 0-96-43.008-96-96s43.008-96 96-96 96 43.008 96 96c0 7.744 0 19.52 0 32zM704 160c0-52.992 43.008-96 96-96s96 43.008 96 96-43.008 96-96 96c-7.744 0-19.52 0-32 0-29.568 0-64 0-64 0s0-69.12 0-96zM576 512h-128v-128h128v128zM800 832c-52.992 0-96-43.008-96-96 0-7.744 0-19.456 0-32 0-29.632 0-64 0-64s69.056 0 96 0c52.992 0 96 42.944 96 96 0 52.992-43.008 96-96 96z" />
|
||||||
|
<glyph unicode="" d="M801.984 406.4c-28.672 17.664-65.408 7.232-81.92-23.36-0.576-1.024-0.576-2.24-1.152-3.264l-1.472 0.96c-41.984-74.432-117.696-124.736-205.184-124.736s-163.136 50.304-205.184 124.736l-1.408-0.832c-0.704 1.6-0.704 3.456-1.6 5.12-16.576 30.528-53.312 41.024-82.048 23.36s-38.528-56.832-21.952-87.36c1.28-2.24 3.264-3.648 4.672-5.696l-1.088-0.704c53.12-94.208 143.104-161.6 248.576-180.608v-70.016h-120.064c-33.152 0-60.032-28.672-60.032-64 0-35.392 26.88-64 60.032-64h360.128c33.216 0 60.032 28.608 60.032 64 0 35.328-26.816 64-60.032 64h-120v69.952c105.472 19.008 195.456 86.528 248.576 180.672l-0.384 0.256c1.088 1.472 2.624 2.432 3.456 4.096 16.64 30.656 6.784 69.76-21.952 87.424zM512.256 320c99.456 0 180.032 85.952 180.032 192v256c0 106.048-80.64 192-180.032 192-99.456 0-180.096-85.952-180.096-192v-256c0-106.048 80.64-192 180.096-192z" />
|
||||||
|
<glyph unicode="" d="M948.544 446.848c100.48 102.784 100.352 269.312 0 372.032-51.392 52.48-118.976 78.144-186.24 76.992-94.144-1.536-249.344-128.96-249.344-128.96s-159.616 129.216-256 129.088c-65.728-0.128-131.392-25.856-181.504-77.056-100.416-102.784-100.48-269.248 0-372.032l436.544-446.336 436.544 446.272z" />
|
||||||
|
<glyph unicode="" d="M512.128 432.064c-87.872 0-159.104 73.728-159.104 164.8 0 91.136 71.232 164.864 159.104 164.864s159.104-73.728 159.104-164.864c0-91.008-71.232-164.8-159.104-164.8zM512.128 960.384c-194.496 0-352.128-163.328-352.128-364.8 0-190.272 159.488-435.776 265.984-555.264 39.808-44.544 86.144-104.704 86.144-104.704s49.792 60.352 92.48 106.304c106.368 114.496 259.648 344.448 259.648 553.6 0 201.536-157.632 364.864-352.128 364.864z" />
|
||||||
|
<glyph unicode="" d="M960.512 710.272c-21.76 35.968-48.576 71.168-81.344 103.808-33.216 32.896-68.992 59.968-105.6 81.6l64.32 64.32c0 0 93.056 0 139.648-46.528 46.464-46.592 46.464-139.648 46.464-139.648l-63.488-63.552zM387.2 128.768h-194.432v194.432l23.36 23.36c39.552-18.56 78.784-44.928 114.176-80.32 35.392-35.328 61.696-74.688 80.32-114.176l-23.424-23.296zM906.752 656.512l-440-448.32c-22.72 37.632-50.688 74.304-84.992 108.352-34.688 34.432-72.064 62.72-110.336 85.312l449.152 440.896c37.824-17.856 75.456-42.944 109.312-76.864s59.008-71.424 76.864-109.376zM128 832v-767.936h768v319.936l128 127.936v-482.88c0-51.392-41.6-93.056-93.056-93.056h-837.888c-51.392 0-93.056 41.664-93.056 93.056v837.824c0 51.456 41.664 93.12 93.056 93.12h482.944l-128-128h-320z" />
|
||||||
|
<glyph unicode="" d="M960.256 96.064v-0.768l-256.256 256.256v-127.488c0-70.72-57.344-128.064-128-128.064h-448c-70.656 0-128 57.344-128 128.064v447.872c0 70.72 57.344 128.064 128 128.064h448c70.656 0 128-57.344 128-128.064v-128.576l256 256v0.64c35.392 0 64-28.608 64-64v-576c0-35.264-28.544-63.808-63.744-63.936z" />
|
||||||
|
<glyph unicode="" d="M897.024 768h-147.84l-42.88 90.624c-9.792 21.312-45.056 37.376-79.36 37.376h-244.8c-34.304 0-69.568-16.064-79.424-37.376l-41.856-90.624h-132.864c-128 0-128-64-128-64v-640c0 0 0-64 128-64h768c128 0 128 64 128 64v640c0 0 0 64-126.976 64zM512 128.064c-141.376 0-256 114.496-256 255.872 0 141.44 114.624 256.064 256 256.064s256-114.624 256-256.064c0-141.376-114.624-255.872-256-255.872zM512 544c-88.384 0-160-71.616-160-160 0-88.32 71.616-160 160-160s160 71.68 160 160c0 88.384-71.616 160-160 160z" />
|
||||||
|
<glyph unicode="" d="M512.064 960c-282.688 0-511.872-229.184-511.872-511.936 0-282.816 229.184-511.936 511.872-511.936 282.752 0 511.936 229.12 511.936 511.936 0 282.752-229.184 511.936-511.936 511.936zM678.976 268.48l-14.848-14.976c-12.416-12.352-33.344-12.992-46.464-1.28l-171.52 147.52c-13.12 11.712-23.040 35.712-22.208 53.248l17.856 283.072c0.896 17.6 16 31.936 33.664 31.936h21.056c17.6 0 32.704-14.336 33.536-31.936l14.656-231.808c0.896-17.536 11.2-42.688 22.848-55.808l112.768-133.568c11.648-12.992 11.136-33.984-1.344-46.4z" />
|
||||||
|
<glyph unicode="" d="M512.064 800c-338.944 0-512.96-352.896-512.96-352.896s131.328-352.96 512.96-352.96c345.472 0 512.832 351.616 512.832 351.616s-168.64 354.24-512.832 354.24zM512.832 226.496c-123.968 0-213.504 96.576-213.504 220.608 0 124.096 89.536 220.544 213.504 220.544 123.904 0 213.44-96.448 213.44-220.544 0-124.032-89.6-220.608-213.44-220.608zM512.832 579.456c-70.784-0.128-128.128-61.44-128.128-132.352 0-70.848 57.344-132.352 128.128-132.352s128.064 61.504 128.064 132.352c0 70.912-57.28 132.544-128.064 132.352z" />
|
||||||
|
<glyph unicode="" d="M457.856 168.064l289.28-226.496c4.736-3.776 7.616-5.632 10.368-5.632 8 0 10.496 5.504 10.496 14.528v214.4c0 15.104 9.984 27.136 23.36 27.136h105.152c127.488 0 127.36 61.44 127.36 61.44v640.064c0 0 0 66.56-127.872 66.56h-767.936c-128 0-128-66.56-128-66.56v-640.064c0 0-0.064-61.44 128.448-61.44h256c0 0 53.568-1.472 73.344-23.936z" />
|
||||||
|
<glyph unicode="" d="M1024 26.752c0-50.176-41.6-90.752-93.12-90.752h-291.264v351.68c0 53.056-38.016 96.128-85.056 96.128h-85.12c-46.976 0-85.12-43.072-85.12-96.128v-351.68h-291.264c-51.392 0-93.056 40.576-93.056 90.752v478.976c0 23.36 9.344 44.48 24.192 60.544l-0.96 1.856 425.92 372.992c34.304 25.152 89.984 25.152 124.288 0l427.264-372.992-0.448-2.368c14.592-16.064 23.744-36.928 23.744-60.032v-478.976z" />
|
||||||
|
<glyph unicode="" d="M896-64h-192v128h192.064v640h-768.064v-640h192v-128h-192c-70.656 0-128 57.344-128 128v768c0 70.656 57.344 128 128 128h768c70.656 0 128-57.344 128-128v-768c0-70.656-57.344-128-128-128zM192 895.936c-35.392 0-64-28.608-64-63.936 0-35.392 28.608-64 64-64s64 28.608 64 64c0 35.328-28.608 63.936-64 63.936zM384 895.936c-35.392 0-64-28.608-64-63.936 0-35.392 28.608-64 64-64s64 28.608 64 64c0 35.328-28.608 63.936-64 63.936zM271.936 200.704c-22.208 23.232-22.208 60.864 0 84.16l196.928 209.408c6.144 6.464 13.44 10.496 21.12 13.44 0.064 0.064 0.192 0.064 0.32 0.128 5.888 2.24 11.84 3.456 17.984 3.712 2.24 0.192 4.416 0.384 6.656 0.256 2.752-0.192 5.376-1.024 8-1.6 11.328-2.24 22.272-6.72 30.976-15.872l196.864-209.408c22.272-23.296 22.272-60.928 0-84.16-22.272-23.104-58.304-23.104-80.576 0l-94.208 119.232v-319.936c0-34.176-32.064-64.064-64.64-64.064-32.512 0-63.36 29.888-63.36 64.064v319.936l-95.488-119.296c-22.272-23.168-58.304-23.168-80.576 0z" />
|
||||||
|
<glyph unicode="" d="M723.392 353.6c-11.328 11.456-15.104 32.704-8.384 47.296 0 0 47.232 102.464 47.232 177.728 0 210.624-170.432 381.376-380.736 381.376s-380.8-170.752-380.8-381.312c0-210.624 170.496-381.376 380.8-381.376 75.2 0 177.408 47.36 177.408 47.36 14.656 6.784 35.968 2.944 47.232-8.448l291.456-291.776c11.456-11.392 30.080-11.392 41.344 0l75.776 75.904c11.456 11.456 11.456 30.144 0 41.472l-291.328 291.776zM381.504 373.376c-113.088 0-205.056 92.032-205.056 205.312 0 113.216 92.032 205.312 205.056 205.312s204.992-92.096 204.992-205.312c0-113.28-91.904-205.312-204.992-205.312z" />
|
||||||
|
<glyph unicode="" d="M449.024 596.288c106.56 0 193.024 81.344 193.024 181.888-0.064 100.416-86.464 181.824-193.024 181.824s-193.024-81.408-193.024-181.824c0-100.48 86.464-181.888 193.024-181.888zM600.32 583.68c-42.56-29.44-94.592-47.424-151.296-47.424-56.96 0-109.12 18.112-151.744 47.744-173.248-37.312-297.28-136.832-297.28-254.016v-258.88c0-17.152 14.4-31.104 32-31.104h64c17.6 0 32 12.608 32 28.096 0 8.96 0 201.856 0 201.856 0 16.64 9.536 9.984 21.376 9.984 11.776 0 21.312-9.024 21.312-19.968l0.32-179.968c0.896-10.368 9.6-84.416 20.544-86.592 0 0 66.56-57.344 256.448-57.344 191.232 0 256.448 57.344 256.448 57.344 10.944 2.112 19.712 76.16 20.544 86.592l0.32 179.968c0 11.008 9.536 19.968 21.376 19.968 11.776 0 21.312-9.024 21.312-19.968 0 0 0-182.912 0-191.872 0-15.488 14.4-28.096 32-28.096h64c17.6 0 32 14.016 32 31.104v258.88c0 116.864-123.392 216.128-295.68 253.696z" />
|
||||||
|
<glyph unicode="" d="M896 864c-50.496 0-768 0-768 0-50.496 0-128-41.152-128-90.944v-18.112c0 0 432.768-361.856 512-361.856s512 360.704 512 360.704v19.2c0 49.856-77.504 91.008-128 91.008zM0 608.96v-512.896c0 0 0-64.064 128-64.064h768c128.192 0 128 64.064 128 64.064v514.496c0 0-364.16-324.992-512-324.992-146.304 0-512 323.392-512 323.392z" />
|
||||||
|
<glyph unicode="" d="M896-64h-768c-35.328 0-64 28.608-64 64.064v447.936c0 35.328 28.672 64 64 64h64v128c0 176.704 143.232 320 320 320s320-143.296 320-320v-128h64c35.392 0 64-28.672 64-64v-447.936c0-35.456-28.608-64.064-64-64.064zM704 640c0 105.984-85.952 192-192 192s-192-86.016-192-192v-128h384v128z" />
|
||||||
|
<glyph unicode="" d="M767.872 787.008l-0.128-0.064c-0.896 0.64-1.6 1.536-2.624 2.24-29.184 20.032-68.992 12.608-89.024-16.704-19.968-29.312-12.48-69.312 16.64-89.344 0.768-0.64 1.536-0.896 2.24-1.28l-0.256-0.448c82.88-58.048 137.28-154.496 137.28-263.744 0-177.536-143.296-321.472-320-321.472s-320 143.936-320 321.472c0 109.248 54.4 205.696 137.28 263.744l-0.256 0.448c0.704 0.384 1.472 0.64 2.24 1.216 29.184 20.032 36.608 60.032 16.64 89.344-20.032 29.312-59.84 36.8-89.024 16.704-0.96-0.704-1.728-1.536-2.688-2.24l-0.064 0.128c-116.032-81.408-192.128-216.32-192.128-369.344 0-248.576 200.576-450.176 448-450.176s448 201.6 448 450.176c0 153.024-76.096 287.936-192.128 369.344zM512 352c35.392 0 64 28.608 64 64v447.936c0 35.392-28.608 64.064-64 64.064-35.328 0-64-28.672-64-64.064v-447.936c0-35.392 28.672-64 64-64z" />
|
||||||
|
<glyph unicode="" d="M320 576c-35.328 0-64-28.608-64-64s28.672-64 64-64 64 28.608 64 64-28.672 64-64 64zM512 384c-35.328 0-64-28.608-64-64s28.672-64 64-64 64 28.608 64 64-28.672 64-64 64zM320 384c-35.328 0-64-28.608-64-64s28.672-64 64-64 64 28.608 64 64-28.672 64-64 64zM896 895.936h-128c0 35.392-28.608 64.064-64 64.064s-64-28.672-64-64.064h-256c0 35.392-28.672 64.064-64 64.064s-64-28.672-64-64.064h-128c-70.656 0-128-57.28-128-127.936v-640c0-70.72 57.344-128 128-128h768c70.656 0 128 57.28 128 128v640c0 70.656-57.344 127.936-128 127.936zM896 128h-768v640h128c0-35.392 28.672-64 64-64s64 28.608 64 64h256c0-35.392 28.608-64 64-64s64 28.608 64 64h128v-640zM704 576c-35.392 0-64-28.608-64-64s28.608-64 64-64 64 28.608 64 64-28.608 64-64 64zM512 576c-35.328 0-64-28.608-64-64s28.672-64 64-64 64 28.608 64 64-28.672 64-64 64zM704 384c-35.392 0-64-28.608-64-64s28.608-64 64-64 64 28.608 64 64-28.608 64-64 64z" />
|
||||||
|
<glyph unicode="" d="M918.272 527.040c-17.344 2.56-35.968 18.304-41.344 35.008l-26.112 63.232c-8.128 15.552-6.272 39.872 4.352 53.952l42.112 56.192c10.624 14.080 9.728 36.352-1.984 49.536l-46.272 46.4c-13.12 11.712-35.52 12.544-49.6 1.984l-56.128-42.24c-14.144-10.496-38.4-12.48-54.016-4.288l-63.168 26.048c-16.832 5.312-32.64 24-35.008 41.472l-9.984 69.504c-2.496 17.408-18.816 33.152-36.352 34.944 0 0-10.816 1.216-32.768 1.216s-32.768-1.216-32.768-1.216c-17.536-1.792-33.92-17.536-36.352-34.944l-9.984-69.504c-2.432-17.472-18.176-36.16-35.008-41.472l-63.168-26.048c-15.552-8.192-39.808-6.208-53.888 4.288l-56.256 42.24c-14.016 10.624-36.416 9.728-49.6-1.984l-46.208-46.272c-11.648-13.184-12.544-35.52-1.984-49.6l42.176-56.192c10.56-14.080 12.48-38.4 4.288-53.952l-26.048-63.296c-5.376-16.704-24-32.448-41.408-35.008l-69.504-9.792c-17.472-2.56-33.216-18.88-35.008-36.416 0 0-1.152-10.88-1.152-32.832 0-21.952 1.152-32.896 1.152-32.896 1.856-17.472 17.6-33.792 35.008-36.288l69.504-9.856c17.408-2.496 36.032-18.304 41.408-35.008l26.112-63.232c8.192-15.616 6.272-39.808-4.288-53.888l-42.176-56.256c-10.56-14.144-13.12-33.28-5.632-42.496 7.424-9.216 28.864-32.064 28.928-32.064 0-0.128 7.232-6.72 16-14.656 8.768-8.064 44.48-19.2 58.56-8.64l56.256 42.112c14.080 10.624 38.336 12.544 53.888 4.352l63.040-25.984c16.832-5.44 32.576-24 35.008-41.472l9.984-69.504c2.432-17.344 18.816-33.28 36.288-35.072 0 0 10.88-1.152 32.832-1.152s32.768 1.152 32.768 1.152c17.472 1.792 33.856 17.664 36.352 35.072l9.984 69.504c2.368 17.472 18.112 36.032 35.008 41.472l63.104 25.984c15.616 8.192 39.872 6.272 54.016-4.224l56.256-42.24c14.144-10.56 36.352-9.664 49.6 1.92l46.272 46.336c11.648 13.184 12.48 35.52 1.856 49.6l-42.112 56.256c-10.624 14.080-12.48 38.272-4.352 53.888l26.112 63.232c5.376 16.768 24 32.512 41.344 35.008l69.504 9.856c17.344 2.496 33.152 18.816 35.008 36.288 0 0 1.152 10.88 1.152 32.896 0 21.952-1.152 32.832-1.152 32.832-1.856 17.536-17.6 33.856-35.008 36.416l-69.44 9.792zM512 320c-70.656 0-128 57.344-128 128 0 70.72 57.344 128 128 128 70.592 0 128-57.344 128-128 0-70.656-57.344-128-128-128z" />
|
||||||
|
<glyph unicode="" d="M768 697.024v0h128c35.392 0 64-28.672 64-64v-640c0-35.392-28.608-64-64-64h-672c-88.384 0-160 71.616-160 160v703.936c0 88.384 71.616 160.064 160 160.064h672c35.392 0 64-28.672 64-64 0-35.392-28.608-64.064-64-64.064h-640c-35.328 0-64-28.608-64-64s28.672-64 64-64h128v-256l64 64 64-64v256h256z" />
|
||||||
|
<glyph unicode="" d="M0 64v192h128v-192.128h640v768.128h-640v-192h-128v192c0 70.656 57.344 128 128 128h640c70.72 0 128-57.344 128-128v-768c0-70.72-57.28-128-128-128h-640c-70.656 0-128 57.28-128 128zM264.768 688c23.232 22.272 60.864 22.272 84.096 0l209.408-196.8c6.528-6.208 10.496-13.568 13.504-21.184 0.064-0.128 0.064-0.192 0.128-0.32 2.24-5.824 3.456-11.84 3.648-17.984 0.256-2.24 0.448-4.416 0.256-6.72-0.128-2.688-1.024-5.248-1.664-7.936-2.176-11.264-6.656-22.208-15.872-30.976l-209.408-196.8c-23.232-22.272-60.864-22.272-84.096 0-23.168 22.272-23.168 58.24 0 80.512l119.232 94.208h-320c-34.112 0-64 32.064-64 64.64 0 32.512 29.888 63.36 64 63.36h320l-119.232 95.552c-23.232 22.144-23.232 58.304 0 80.448z" />
|
||||||
|
<glyph unicode="" d="M928 704h-64v-640c0 0-1.984-128-128-128 0 0-318.016 0-448 0s-128 128-128 128v640h-64c-35.328 0-64 28.672-64 64s28.672 64 64 64h320v32c0 53.056 42.944 96 96 96 52.992 0 96-42.944 96-96v-32h320c35.392 0 64-28.608 64-64s-28.608-64-64-64zM736 704h-448v-640h448v640zM416 640c35.328 0 64-28.672 64-64v-384c0-35.392-28.672-64-64-64s-64 28.608-64 64v384c0 35.328 28.672 64 64 64zM608 640c35.392 0 64-28.672 64-64v-384c0-35.392-28.608-64-64-64s-64 28.608-64 64v384c0 35.328 28.608 64 64 64z" />
|
||||||
|
<glyph unicode="" d="M896 768c0 0-278.016 0.064-320 0.064s-89.984 127.936-128 127.936-320 0-320 0c-70.656 0-128-57.28-128-128v-640.064c0-126.656 128-128 128-128h768c70.656 0 128 57.344 128 128v512c0 70.72-57.344 128.064-128 128.064zM896.064 127.936h-768.064v640.064c0 0 214.016 0 254.016 0s89.984-128 128-128c40 0 386.048 0 386.048 0v-512.064z" />
|
||||||
|
<glyph unicode="" d="M895.424 960.064h-767.872c-127.296 0-127.552-128.064-127.552-128.064v-511.936c0 0 0.704-128.064 128-128.064h256c0 0 53.568-1.472 73.344-23.936l289.344-226.496c4.736-3.776 7.616-5.632 10.432-5.632 8 0 10.368 5.504 10.368 14.592v214.336c0 15.104 9.984 27.2 23.424 27.2h105.088c125.312 0 128 128.064 128 128.064v511.872c0 0-1.28 128.064-128.576 128.064zM896 320.064h-256v-128l-164.608 128h-347.392v511.936h768v-511.936z" />
|
||||||
|
<glyph unicode="" d="M896 63.872h-768v768h320v128l-358.976 0.064c-49.152 0-89.024-39.936-89.024-89.088v-845.952c0-49.152 39.872-89.024 89.024-89.024h845.952c49.152 0 89.024 39.872 89.024 89.024v358.976h-128v-320zM1024 896c0 14.656-6.080 27.52-14.72 38.272-1.344 1.728-2.048 3.712-3.584 5.312-0.192 0.128-0.256 0.384-0.384 0.576-0.384 0.32-0.448 0.832-0.832 1.216-4.096 4.096-9.152 6.528-13.952 9.28-2.112 1.216-3.84 3.008-6.080 3.968-8.704 3.776-17.92 5.376-27.264 5.12-0.128 0-0.256 0.064-0.384 0.064h-313.024c-36.992 0.064-67.008-28.544-67.008-63.808 0-35.2 30.080-63.808 67.136-63.808h161.216l-402.56-403.328c-24.832-24.768-24.832-64.768 0-89.472 24.832-24.768 65.024-24.768 89.792 0l403.968 403.52v-163.2c0-37.056 28.608-67.072 63.872-67.072s63.808 30.016 63.808 67.072v313.024c0 0.64-0.32 1.152-0.32 1.728 0 0.512 0.32 1.024 0.32 1.536z" />
|
||||||
|
<glyph unicode="" d="M0 576.448v107.712c0 45.952 38.208 83.136 85.312 83.136h107.392v90.432c0 21.056 21.568 102.208 48.192 102.208h96.384c26.624 0 48.192-81.152 48.192-102.208v-90.432h319.232v90.432c0 21.056 21.632 102.208 48.192 102.208h96.384c26.624 0 48.192-81.152 48.192-102.208v-90.432h41.28c47.168 0 85.376-37.184 85.376-83.136v-107.776h-1024.128zM1024.064 511.36v-492.224c0-45.952-38.208-83.2-85.376-83.2h-853.376c-47.104 0-85.312 37.248-85.312 83.2v492.224h1024.064z" />
|
||||||
|
<glyph unicode="" d="M32 447.936c288 32.064 448 192.064 480 480.064 32.064-288 192.064-448 480.128-480.064-288.064-32-448.064-192-480.128-480-32 288-192 448-480 480z" />
|
||||||
|
<glyph unicode="" d="M1024 448l-380.8-128-10.304-384-245.696 304.96-387.2-109.376 228.992 316.416-228.992 316.416 387.2-109.312 245.696 304.896 10.304-384 380.8-128z" />
|
||||||
|
<glyph unicode="" d="M768 223.552c35.392 0 64 28.672 64 64.064s-28.608 64.064-64 64.064-64-28.672-64-64.064 28.608-64.064 64-64.064zM938.752 864h-853.376c-47.168 0-85.376-38.208-85.376-85.376v-661.184c0-47.168 38.208-85.44 85.376-85.44h853.376c47.104 0 85.312 38.272 85.312 85.44v661.184c0 47.168-38.208 85.376-85.312 85.376zM896.064 160.192h-768.064v255.552h768.064v-255.552zM896.064 607.872h-768.064v128.064h768.064v-128.064z" />
|
||||||
|
<glyph unicode="" d="M939.712 875.712c-112.448 112.448-294.784 112.448-407.296-0.064l-448-448c-112.512-112.512-112.512-294.848-0.064-407.296s294.784-112.512 407.296 0l94.848 92.16c-51.008 1.152-97.536 17.728-136.96 44.672l-48.448-46.4c-62.528-62.528-163.84-62.528-226.304 0-62.464 62.464-62.464 163.84 0.064 226.304l448 448c62.528 62.528 163.84 62.528 226.24 0 62.528-62.528 62.592-163.776 0.064-226.24l-223.232-224.768c-18.752-18.752-49.152-18.752-67.904 0s-18.752 49.152 0 67.904l168.576 170.176c12.48 12.48 12.544 32.768 0 45.248l-45.248 45.248c-12.48 12.48-32.768 12.48-45.248 0l-168.576-170.176c-68.736-68.736-68.736-180.16 0-248.896s180.16-68.736 248.896 0l223.232 224.832c112.448 112.448 112.448 294.848 0.064 407.296z" />
|
||||||
|
<glyph unicode="" d="M939.648 875.648c-54.464 54.4-126.784 84.352-203.648 84.352-76.928 0-149.248-29.952-203.648-84.352 0 0-181.696-181.632-192.128-191.936-54.208-54.336-84.096-126.72-84.224-204.096 0.128-76.8 30.080-148.992 84.352-203.264l23.36-23.424c6.272-6.272 14.528-9.344 22.656-9.344 8.192 0 16.384 3.136 22.656 9.344l45.248 45.248c12.48 12.48 12.48 32.768 0 45.248l-23.424 23.424c-61.376 61.376-62.208 162.048-1.792 224.512 1.856 1.856 193.856 193.792 193.856 193.792 30.208 30.208 70.336 46.848 113.088 46.848s82.88-16.64 113.152-46.784v-0.064c62.528-62.592 62.528-163.776 0-226.24l-9.856-9.856c15.424-41.6 24.64-86.208 24.704-133.056 0-8.512-1.216-16.704-1.664-25.024l77.312 77.376c112.448 112.512 112.384 294.912 0 407.296zM660.16 643.136c-6.208 6.272-14.464 9.344-22.592 9.344-8.256 0-16.448-3.136-22.656-9.344l-45.248-45.248c-12.544-12.48-12.544-32.768 0-45.248l23.36-23.424c61.376-61.376 62.272-162.048 1.856-224.512-1.856-1.856-193.856-193.792-193.856-193.792-30.144-30.272-70.272-46.912-113.088-46.912-42.688 0-82.816 16.64-113.088 46.784v0.064c-62.528 62.592-62.528 163.776-0.064 226.24l9.92 9.856c-15.488 41.6-24.704 86.208-24.704 133.056 0 8.512 1.152 16.704 1.664 25.024l-77.312-77.376c-112.512-112.512-112.448-294.848 0-407.232 54.464-54.464 126.784-84.416 203.648-84.416s149.184 29.952 203.648 84.352c0 0 181.696 181.632 192.128 191.936 54.208 54.336 84.096 126.72 84.224 204.096-0.128 76.8-30.144 148.992-84.352 203.264l-23.488 23.488z" />
|
||||||
|
<glyph unicode="" d="M1012.736 484.16l-241.216 352c-11.968 17.408-31.68 27.84-52.8 27.84h-654.72c-35.392 0-64-28.672-64-64v-704c0-35.328 28.608-64 64-64h654.72c21.12 0 40.896 10.368 52.8 27.84l241.216 352c15.040 21.76 15.040 50.56 0 72.32zM736 352c-52.992 0-96 43.008-96 96s43.008 96 96 96 96-43.008 96-96-43.008-96-96-96z" />
|
||||||
|
<glyph unicode="" d="M842.752 960h-660.544c-47.552 0-86.208-38.144-86.208-64v-853.376c0-68.416 38.656-106.624 86.208-106.624h660.544c47.040 0 85.248 38.208 85.248 85.312v853.376c0 47.168-38.208 85.312-85.248 85.312zM544 128h-256c-35.392 0-64 28.608-64 64s28.608 64 64 64h256c35.392 0 64-28.608 64-64s-28.608-64-64-64zM736 384h-448c-35.392 0-64 28.608-64 64s28.608 64 64 64h448c35.392 0 64-28.608 64-64s-28.608-64-64-64zM736 640h-448c-35.392 0-64 28.608-64 64s28.608 64 64 64h448c35.392 0 64-28.608 64-64s-28.608-64-64-64z" />
|
||||||
|
<glyph unicode="" d="M938.752 32h-853.376c-47.168 0-85.376 37.248-85.376 83.264v665.472c0 46.016 38.208 83.264 85.376 83.264h853.376c47.104 0 85.312-37.248 85.312-83.264v-665.472c0-46.016-38.208-83.264-85.312-83.264zM896.064 736h-768.064v-511.808c0 0 64 64.064 128 128.064 64 64.064 128 0 128 0l64-64c0 0 118.72 120.768 192 192.128 66.88 66.944 128 0 128 0l128-128.128 0.064 383.744zM320 480c-35.328 0-64 28.672-64 63.936 0 35.392 28.672 64.064 64 64.064s64-28.672 64-64.064c0-35.264-28.672-63.936-64-63.936z" />
|
||||||
|
<glyph unicode="" d="M928-64h-832c-51.2 0-96 44.8-96 96v832c0 51.2 44.8 96 96 96h825.6c57.6 0 102.4-44.8 102.4-96v-825.6c0-57.6-44.8-102.4-96-102.4zM748.8 768c-121.6 0-172.8-83.2-172.8-166.4v-89.6h-64v-128h64v-384h128v384h128v128h-128v70.4c0 38.4 6.4 57.6 51.2 57.6h76.8v121.6s-38.4 6.4-83.2 6.4z" />
|
||||||
|
<glyph unicode="" d="M1017.6 646.4c0 83.2-64 147.2-147.2 147.2-115.2 6.4-236.8 6.4-358.4 6.4-121.6 0-243.2 0-358.4-6.4-83.2 0-147.2-64-147.2-147.2-6.4-70.4-6.4-134.4-6.4-198.4s0-128 6.4-198.4c0-83.2 64-147.2 147.2-147.2 115.2-6.4 236.8-6.4 358.4-6.4 121.6 0 243.2 0 358.4 6.4 83.2 0 147.2 64 147.2 147.2 6.4 64 6.4 128 6.4 198.4 0 64 0 128-6.4 198.4zM384 224v448l320-224-320-224z" />
|
||||||
|
<glyph unicode="" d="M876.8 896c-147.2 6.4-243.2-76.8-294.4-243.2 25.6 12.8 51.2 19.2 76.8 19.2 51.2 0 76.8-32 70.4-89.6 0-38.4-25.6-89.6-70.4-153.6-38.4-70.4-70.4-102.4-96-102.4-25.6 0-51.2 51.2-76.8 160-6.4 25.6-19.2 108.8-38.4 236.8-19.2 115.2-70.4 172.8-147.2 160-32 0-83.2-32-153.6-96-44.8-38.4-96-83.2-147.2-128l51.2-64c44.8 32 70.4 51.2 76.8 51.2 38.4 0 70.4-57.6 96-166.4 32-108.8 57.6-211.2 83.2-313.6 38.4-108.8 89.6-166.4 153.6-166.4 96 0 211.2 89.6 352 275.2 134.4 179.2 204.8 313.6 211.2 416 6.4 134.4-44.8 204.8-147.2 204.8z" />
|
||||||
|
<glyph unicode="" d="M1024 768c-38.4-19.2-76.8-25.6-121.6-32 44.8 25.6 76.8 64 89.6 115.2-38.4-25.6-83.2-38.4-134.4-51.2-38.4 38.4-96 64-153.6 64-108.8 0-204.8-96-204.8-211.2 0-19.2 0-32 6.4-44.8-172.8 6.4-332.8 89.6-435.2 217.6-19.2-32-25.6-64-25.6-102.4 0-70.4 38.4-134.4 96-172.8-32 0-64 12.8-96 25.6 0-102.4 70.4-185.6 166.4-204.8-19.2-12.8-38.4-12.8-57.6-12.8-12.8 0-25.6 0-38.4 6.4 25.6-83.2 102.4-147.2 198.4-147.2-70.4-57.6-160-89.6-262.4-89.6h-51.2c96-64 204.8-96 320-96 384 0 595.2 320 595.2 595.2v25.6c44.8 32 83.2 70.4 108.8 115.2z" />
|
||||||
|
<glyph unicode="" d="M179.2 57.6c76.8 115.2 211.2 185.6 358.4 185.6 134.4 0 256-64 339.2-160 89.6 96 147.2 224 147.2 364.8 0 281.6-230.4 512-512 512s-512-230.4-512-512c0-153.6 70.4-294.4 179.2-390.4zM787.2 294.4c-6.4-19.2-19.2-19.2-38.4-12.8-70.4 32-147.2 51.2-224 51.2-83.2 0-160-19.2-230.4-51.2-6.4-6.4-25.6-6.4-32 19.2-6.4 12.8 6.4 25.6 12.8 32 76.8 38.4 160 57.6 249.6 57.6s172.8-19.2 243.2-51.2c12.8-12.8 25.6-25.6 19.2-44.8zM832 422.4c-6.4-6.4-12.8-12.8-25.6-12.8h-6.4c-83.2 38.4-179.2 64-275.2 64s-185.6-19.2-268.8-57.6h-6.4c-12.8 0-19.2 6.4-25.6 12.8l-6.4 12.8c0 6.4 6.4 19.2 12.8 19.2 89.6 38.4 192 64 300.8 64 108.8 0 211.2-25.6 300.8-64v-38.4zM185.6 633.6c102.4 44.8 217.6 64 339.2 64 115.2 0 230.4-25.6 332.8-64 12.8-6.4 25.6-19.2 25.6-38.4 0-25.6-19.2-44.8-44.8-44.8h-6.4c-96 38.4-198.4 57.6-307.2 57.6s-211.2-19.2-307.2-51.2h-6.4c-25.6 0-44.8 19.2-44.8 44.8 0 6.4 6.4 25.6 19.2 32zM537.6 76.8c-89.6 0-166.4-44.8-211.2-108.8 57.6-19.2 121.6-32 185.6-32 83.2 0 160 19.2 224 51.2-44.8 57.6-115.2 89.6-198.4 89.6z" />
|
||||||
|
<glyph unicode="" d="M979.2 371.2c6.4 25.6 6.4 51.2 6.4 76.8 0 262.4-211.2 473.6-473.6 473.6-25.6 0-51.2 0-76.8-6.4-38.4 32-89.6 44.8-147.2 44.8-160 0-288-128-288-288 0-57.6 12.8-108.8 44.8-153.6-6.4-19.2-6.4-44.8-6.4-70.4 0-262.4 211.2-473.6 473.6-473.6 25.6 0 51.2 0 76.8 6.4 44.8-25.6 96-44.8 153.6-44.8 160 0 288 128 288 288-6.4 57.6-19.2 108.8-51.2 147.2zM736 230.4c-19.2-32-51.2-51.2-89.6-70.4-38.4-19.2-83.2-25.6-134.4-25.6-64 0-115.2 12.8-160 32-32 12.8-51.2 38.4-70.4 64-19.2 32-25.6 57.6-25.6 83.2 0 12.8 6.4 25.6 19.2 38.4 12.8 12.8 25.6 19.2 44.8 19.2 12.8 0 25.6-6.4 38.4-12.8 6.4-6.4 12.8-19.2 19.2-38.4 6.4-19.2 19.2-32 25.6-44.8 6.4-12.8 19.2-25.6 38.4-32 19.2-6.4 38.4-12.8 64-12.8 38.4 0 70.4 6.4 89.6 25.6 25.6 19.2 32 38.4 32 57.6 0 19.2-6.4 32-19.2 44.8-6.4 19.2-19.2 25.6-38.4 32-19.2 6.4-51.2 12.8-83.2 19.2-44.8 12.8-83.2 25.6-115.2 38.4-32 12.8-57.6 32-76.8 51.2-19.2 25.6-25.6 57.6-25.6 89.6 0 32 12.8 64 32 89.6 19.2 25.6 44.8 44.8 83.2 57.6 38.4 12.8 76.8 19.2 128 19.2 38.4 0 70.4-6.4 102.4-12.8 25.6-6.4 51.2-19.2 70.4-38.4 19.2-12.8 32-32 44.8-44.8s12.8-32 12.8-51.2c0-12.8-6.4-25.6-19.2-38.4-12.8-12.8-25.6-19.2-44.8-19.2-12.8 0-25.6 6.4-32 12.8-6.4 6.4-19.2 19.2-25.6 32-12.8 25.6-25.6 38.4-44.8 51.2-12.8 12.8-38.4 19.2-76.8 19.2-32 0-57.6-6.4-76.8-19.2-19.2-12.8-32-25.6-32-44.8 0-12.8 6.4-19.2 12.8-32l25.6-19.2c12.8-6.4 25.6-12.8 38.4-12.8 12.8-6.4 32-6.4 64-12.8 32-12.8 64-25.6 96-32 32-6.4 51.2-19.2 76.8-32 19.2-12.8 38.4-32 51.2-51.2 6.4-25.6 12.8-51.2 12.8-76.8 0-38.4-12.8-70.4-32-102.4z" />
|
||||||
|
<glyph unicode="" d="M512 960c-281.6 0-512-230.4-512-512 0-211.2 128-390.4 307.2-467.2 0 38.4 0 76.8 6.4 115.2 12.8 38.4 64 281.6 64 281.6s-12.8 32-12.8 76.8c0 76.8 44.8 134.4 96 134.4s70.4-32 70.4-76.8-32-115.2-44.8-179.2c-12.8-57.6 25.6-96 83.2-96 96 0 160 121.6 160 275.2 0 115.2-76.8 198.4-211.2 198.4-153.6 0-249.6-115.2-249.6-243.2 0-44.8 12.8-76.8 32-102.4 6.4-12.8 12.8-12.8 6.4-25.6 0-6.4-6.4-32-12.8-38.4-6.4-12.8-12.8-19.2-25.6-12.8-70.4 32-102.4 108.8-102.4 198.4 0 147.2 121.6 320 364.8 320 198.4 0 326.4-140.8 326.4-294.4 0-198.4-108.8-352-275.2-352-57.6 0-108.8 32-128 64 0 0-32-115.2-38.4-140.8-12.8-38.4-32-76.8-51.2-108.8 51.2-32 96-38.4 147.2-38.4 281.6 0 512 230.4 512 512s-230.4 512-512 512z" />
|
||||||
|
<glyph unicode="" d="M256 915.2c-134.4-51.2-224-147.2-249.6-288-12.8-83.2-6.4-172.8 32-249.6 6.4-19.2 19.2-32 32-51.2l19.2-19.2c12.8 6.4 25.6 6.4 32 12.8 44.8 25.6 76.8 64 115.2 96-128 153.6 6.4 332.8 172.8 377.6 160 38.4 371.2-25.6 416-192 19.2-64 6.4-140.8-44.8-192-25.6-25.6-64-44.8-102.4-51.2-25.6-6.4-44.8-6.4-70.4 0-12.8 6.4-25.6 6.4-38.4 6.4-19.2 6.4-38.4 6.4-38.4 25.6v268.8c0 19.2 0 12.8-12.8 19.2-12.8 0-25.6 0-38.4 6.4-38.4 0-83.2 0-121.6-6.4-12.8 0-19.2 0-19.2-19.2v-140.8l6.4-294.4c0-32 0-102.4-32-115.2-38.4-19.2-70.4 19.2-108.8 25.6 6.4-51.2-25.6-147.2 32-172.8 51.2-25.6 115.2-32 172.8-12.8 115.2 38.4 153.6 172.8 140.8 275.2 179.2-51.2 377.6 38.4 454.4 198.4 57.6 115.2 32 262.4-51.2 358.4-166.4 185.6-480 224-697.6 134.4z" />
|
||||||
|
<glyph unicode="" d="M928-64h-832c-51.2 0-96 44.8-96 96v832c0 51.2 44.8 96 96 96h825.6c57.6 0 102.4-44.8 102.4-96v-825.6c0-57.6-44.8-102.4-96-102.4zM262.4 768c-44.8 0-76.8-32-76.8-76.8 0-38.4 25.6-76.8 70.4-76.8 44.8 0 70.4 32 70.4 76.8 6.4 44.8-19.2 76.8-64 76.8zM339.2 569.6h-147.2v-441.6h147.2v441.6zM876.8 377.6c0 134.4-64 204.8-160 204.8-76.8 0-108.8-44.8-128-70.4v64h-153.6v-441.6h147.2v236.8c0 12.8 0 25.6 6.4 32 12.8 25.6 32 51.2 76.8 51.2 51.2 0 70.4-38.4 70.4-96v-230.4h147.2v249.6z" />
|
||||||
|
<glyph unicode="" d="M0 89.6v0zM236.8 396.8c89.6 0 153.6 96 140.8 211.2-19.2 121.6-108.8 217.6-198.4 217.6-89.6 6.4-153.6-89.6-140.8-211.2 19.2-115.2 108.8-217.6 198.4-217.6zM1024 704v83.2c0 96-76.8 172.8-166.4 172.8h-684.8c-96 0-172.8-76.8-172.8-166.4 57.6 51.2 140.8 96 224 96h358.4l-83.2-70.4h-108.8c70.4-25.6 115.2-115.2 115.2-204.8 0-76.8-44.8-140.8-102.4-185.6-57.6-44.8-70.4-64-70.4-102.4 0-32 64-89.6 96-108.8 96-64 128-128 128-230.4 0-19.2 0-32-6.4-51.2h307.2c96 0 172.8 76.8 172.8 172.8v531.2h-192v-192h-64v192h-198.4v64h192v192h64v-192h192zM185.6 192h64c-25.6 25.6-51.2 57.6-51.2 96 0 25.6 6.4 44.8 19.2 64h-32c-76.8 6.4-140.8 32-185.6 70.4v-275.2c51.2 32 115.2 44.8 185.6 44.8zM6.4 70.4v19.2c-6.4-6.4-6.4-12.8 0-19.2zM454.4 6.4c-12.8 57.6-70.4 89.6-140.8 140.8-25.6 6.4-57.6 12.8-89.6 12.8-89.6 0-172.8-32-217.6-89.6 12.8-76.8 83.2-134.4 166.4-134.4h288v32c0 12.8 0 25.6-6.4 38.4z" />
|
||||||
|
<glyph unicode="" d="M512 960c-281.6 0-512-230.4-512-512s230.4-512 512-512 512 230.4 512 512-230.4 512-512 512zM825.6 697.6c51.2-64 83.2-140.8 83.2-230.4-57.6 12.8-115.2 19.2-166.4 19.2-38.4 0-76.8-6.4-115.2-12.8l-25.6 64c83.2 32 160 83.2 224 160zM512 844.8c96 0 179.2-32 249.6-89.6-51.2-64-121.6-108.8-198.4-140.8-51.2 108.8-102.4 179.2-134.4 224 25.6 6.4 51.2 6.4 83.2 6.4zM332.8 806.4c32-32 83.2-102.4 147.2-217.6-121.6-38.4-243.2-44.8-320-44.8h-38.4c32 115.2 108.8 211.2 211.2 262.4zM115.2 448c12.8 6.4 25.6 6.4 44.8 6.4 83.2 0 217.6 6.4 364.8 51.2 6.4-19.2 12.8-32 25.6-51.2-102.4-32-179.2-83.2-230.4-134.4-51.2-51.2-89.6-96-108.8-128-64 70.4-96 160-96 256zM512 51.2c-89.6 0-172.8 32-236.8 76.8 12.8 25.6 44.8 70.4 89.6 115.2 51.2 44.8 115.2 96 204.8 128 32-83.2 57.6-185.6 76.8-294.4-38.4-19.2-83.2-25.6-134.4-25.6zM736 121.6c-19.2 102.4-44.8 185.6-76.8 268.8 25.6 6.4 51.2 6.4 83.2 6.4 44.8 0 102.4-6.4 153.6-19.2-12.8-108.8-70.4-198.4-160-256z" />
|
||||||
|
<glyph unicode="" d="M921.6 678.4h-256v64h256v-64zM499.2 416c12.8-25.6 25.6-57.6 25.6-96s-6.4-70.4-25.6-102.4l-51.2-51.2c-19.2-12.8-44.8-25.6-70.4-32s-57.6-6.4-89.6-6.4h-288v640h307.2c76.8 0 134.4-25.6 166.4-70.4 19.2-25.6 25.6-57.6 25.6-96s-12.8-70.4-32-96c-6.4-12.8-19.2-25.6-44.8-32 32-12.8 57.6-32 76.8-57.6zM147.2 518.4h134.4c25.6 0 51.2 6.4 70.4 12.8 19.2 12.8 25.6 32 25.6 57.6 0 32-12.8 51.2-32 57.6-25.6 6.4-51.2 12.8-83.2 12.8h-115.2v-140.8zM390.4 332.8c0 32-12.8 57.6-38.4 70.4-12.8 6.4-38.4 12.8-64 12.8h-140.8v-172.8h134.4c25.6 0 51.2 6.4 64 12.8 25.6 6.4 44.8 32 44.8 76.8zM1017.6 435.2c6.4-19.2 6.4-51.2 6.4-89.6h-332.8c0-44.8 19.2-76.8 44.8-96 19.2-12.8 38.4-19.2 64-19.2s51.2 6.4 64 19.2c19.2 6.4 25.6 19.2 32 32h121.6c0-25.6-19.2-57.6-44.8-83.2-38.4-44.8-96-64-172.8-64-57.6 0-115.2 19.2-160 57.6-44.8 32-70.4 96-70.4 179.2 0 76.8 19.2 140.8 64 185.6 44.8 44.8 96 64 166.4 64 38.4 0 76.8-6.4 108.8-19.2 32-12.8 57.6-38.4 76.8-70.4 19.2-32 25.6-64 32-96zM902.4 422.4c0 32-12.8 57.6-32 70.4-19.2 19.2-44.8 25.6-70.4 25.6-32 0-51.2-6.4-70.4-25.6-19.2-19.2-25.6-38.4-32-70.4h204.8z" />
|
||||||
|
<glyph unicode="" d="M565.888 547.328l69.824-33.728 105.408 33.728v61.184c0 126.080-102.784 228.608-229.12 228.608s-229.056-102.592-229.056-228.608v-321.024c0-29.632-24.192-53.696-53.824-53.696s-53.824 24.064-53.824 53.696v134.4h-175.296v-134.4c0-126.080 102.72-228.608 229.12-228.608 126.336 0 229.12 102.592 229.12 228.608v321.024c0 29.568 24.192 53.696 53.824 53.696 29.696 0 53.888-24.128 53.888-53.696l-0.064-61.184zM848.704 421.888v-134.4c0-29.632-24.128-53.696-53.824-53.696-29.696 0-53.888 24.064-53.888 53.696v137.088l-105.344-33.728-69.824 33.728v-137.088c0-126.080 102.784-228.608 229.12-228.608s229.056 102.592 229.056 228.608v134.4h-175.296z" />
|
||||||
|
<glyph unicode="" d="M608 307.2c-19.2-19.2 0-51.2 0-51.2l128-217.6s19.2-25.6 38.4-25.6 38.4 12.8 38.4 12.8l102.4 147.2s12.8 19.2 12.8 32c0 25.6-32 32-32 32l-243.2 76.8c-6.4 0-25.6 6.4-44.8-6.4zM595.2 416c12.8-19.2 44.8-12.8 44.8-12.8l243.2 70.4s32 12.8 38.4 32c6.4 19.2-6.4 38.4-6.4 38.4l-108.8 134.4s-12.8 19.2-32 19.2c-25.6 0-38.4-25.6-38.4-25.6l-140.8-217.6s-6.4-19.2 0-38.4zM480 499.2c32 6.4 38.4 51.2 38.4 51.2v345.6c-6.4 0-6.4 38.4-25.6 51.2-32 19.2-44.8 6.4-51.2 6.4l-198.4-70.4s-19.2-6.4-32-25.6c-12.8-25.6 12.8-57.6 12.8-57.6l211.2-288s19.2-19.2 44.8-12.8zM435.2 358.4c0 25.6-32 44.8-32 44.8l-217.6 108.8s-32 12.8-44.8 6.4c-19.2-12.8-25.6-25.6-32-32l-12.8-172.8s0-32 6.4-44.8c12.8-19.2 44.8-6.4 44.8-6.4l256 57.6c12.8 0 25.6 6.4 32 38.4zM492.8 262.4c-19.2 12.8-44.8-6.4-44.8-6.4l-172.8-185.6s-19.2-25.6-12.8-44.8c6.4-19.2 12.8-25.6 25.6-32l172.8-51.2s19.2-6.4 38.4 0c19.2 0 12.8 32 12.8 32l6.4 256s0 25.6-25.6 32z" />
|
||||||
|
<glyph unicode="" d="M518.4 416l115.2-313.6v-6.4c-38.4-12.8-83.2-19.2-128-19.2-38.4 0-76.8 6.4-108.8 12.8l121.6 326.4zM896 448c0-140.8-76.8-256-192-326.4l115.2 332.8c19.2 51.2 32 96 32 134.4v38.4c32-51.2 44.8-115.2 44.8-179.2zM128 448c0 51.2 12.8 108.8 32 153.6l185.6-486.4c-128 57.6-217.6 185.6-217.6 332.8zM192 652.8c70.4 102.4 185.6 166.4 320 166.4 102.4 0 192-38.4 262.4-96h-6.4c-38.4 0-64-32-64-64s19.2-57.6 38.4-89.6c12.8-25.6 32-57.6 32-102.4 0-32-12.8-70.4-32-121.6l-38.4-128-140.8 403.2c25.6 0 44.8 6.4 44.8 6.4 19.2 0 19.2 32 0 32 0 0-64-6.4-102.4-6.4-38.4 0-102.4 6.4-102.4 6.4-19.2 0-25.6-32 0-32 0 0 19.2 0 38.4-6.4l57.6-160-83.2-243.2-140.8 403.2c25.6 6.4 44.8 6.4 44.8 6.4 19.2 0 19.2 32 0 32 0 0-64-6.4-102.4-6.4h-25.6zM851.2 960h-678.4c-96 0-172.8-76.8-172.8-172.8v-678.4c0-96 76.8-172.8 172.8-172.8h678.4c96 0 172.8 76.8 172.8 172.8v678.4c0 96-76.8 172.8-172.8 172.8zM960 448c0-249.6-198.4-448-448-448s-448 198.4-448 448 198.4 448 448 448 448-198.4 448-448z" />
|
||||||
|
<glyph unicode="" d="M409.6 62.494v343.341h493.929v-439.718l-493.929 96.376zM409.6 839.529l493.929 90.353v-439.718h-493.929v349.365zM331.294 490.165h-331.294v271.059l331.294 60.235v-331.294zM331.294 80.565l-331.294 66.259v259.012h331.294v-325.271z" horiz-adv-x="904" />
|
||||||
|
<glyph unicode="" d="M64 768c19.2-128 128-659.2 377.6-812.8 38.4-25.6 83.2-19.2 115.2 6.4 121.6 102.4 243.2 275.2 275.2 358.4 64-6.4 108.8 12.8 108.8 12.8v128h-115.2c-140.8 0-236.8 166.4-179.2 313.6 38.4 102.4 108.8 25.6 121.6 0 12.8-32 6.4-115.2-6.4-172.8 19.2-51.2 140.8-76.8 166.4-38.4 32 96 44.8 262.4-38.4 352-57.6 38.4-198.4 70.4-300.8 6.4s-102.4-204.8-96-275.2c6.4-70.4 32-217.6 172.8-300.8 12.8-12.8-153.6-230.4-160-217.6-185.6 179.2-249.6 544-262.4 640h-179.2z" />
|
||||||
|
<glyph unicode="" d="M576 512v-236.8c0-57.6 0-96 6.4-108.8 6.4-19.2 19.2-32 38.4-44.8 25.6-12.8 51.2-19.2 76.8-19.2 51.2 0 83.2 6.4 134.4 38.4v-153.6c-44.8-19.2-83.2-32-115.2-38.4-38.4-12.8-76.8-12.8-115.2-12.8-44.8 0-76.8 6.4-108.8 19.2-38.4 12.8-64 32-89.6 51.2-25.6 19.2-44.8 44.8-51.2 70.4-12.8 25.6-12.8 57.6-12.8 108.8v352h-147.2v147.2c38.4 12.8 83.2 32 115.2 57.6 25.6 25.6 51.2 51.2 70.4 89.6 19.2 32 32 76.8 38.4 128h160v-256h256v-192h-256z" />
|
||||||
|
<glyph unicode="" d="M646.4 236.8h-192l-64-300.8h-262.4l25.6 108.8h-153.6l198.4 915.2h448c134.4 0 288-96 236.8-313.6-38.4-192-192-300.8-371.2-300.8h-185.6l-64-300.8h-44.8l-12.8-44.8h134.4l64 300.8h243.2c76.8 0 147.2 25.6 198.4 64l32 25.6c51.2 51.2 83.2 115.2 102.4 192 12.8 76.8 6.4 140.8-32 185.6-19.2 19.2-38.4 38.4-64 51.2 96-38.4 166.4-134.4 134.4-288-38.4-179.2-192-294.4-371.2-294.4zM492.8 524.8c70.4 0 134.4 57.6 153.6 128 19.2 70.4-25.6 128-89.6 128h-128l-64-256h128z" />
|
||||||
|
<glyph unicode="" d="M780.8 160c-204.8 0-275.2 89.6-313.6 204.8l-38.4 121.6c-25.6 89.6-64 153.6-166.4 153.6-70.4 0-147.2-51.2-147.2-198.4 0-115.2 57.6-185.6 140.8-185.6 89.6 0 153.6 70.4 153.6 70.4l44.8-102.4s-64-64-198.4-64c-166.4 0-256 96-256 275.2 0 192 89.6 300.8 262.4 300.8 153.6 0 236.8-57.6 281.6-211.2l38.4-121.6c25.6-89.6 76.8-147.2 198.4-147.2 76.8 0 121.6 19.2 121.6 64 0 32-19.2 57.6-76.8 76.8l-76.8 19.2c-96 25.6-134.4 76.8-134.4 153.6 0 128 102.4 172.8 211.2 172.8 121.6 0 192-44.8 204.8-153.6l-115.2-12.8c-6.4 51.2-38.4 70.4-89.6 70.4s-83.2-25.6-83.2-64 12.8-57.6 64-70.4l76.8-19.2c89.6-25.6 140.8-70.4 140.8-166.4 0-121.6-96-166.4-243.2-166.4z" />
|
||||||
|
<glyph unicode="" d="M928 960h-832c-51.2 0-96-44.8-96-96v-825.6c0-57.6 44.8-102.4 96-102.4h825.6c57.6 0 96 44.8 96 96v832c6.4 51.2-38.4 96-89.6 96zM512 646.4c108.8 0 198.4-89.6 198.4-198.4s-89.6-198.4-198.4-198.4-198.4 89.6-198.4 198.4 89.6 198.4 198.4 198.4zM896 102.4c0-19.2-19.2-38.4-38.4-38.4h-691.2c-19.2 0-38.4 19.2-38.4 38.4v409.6h89.6c-6.4-25.6-6.4-51.2-6.4-76.8 0-166.4 128-307.2 300.8-307.2s300.8 140.8 300.8 307.2c0 25.6-6.4 51.2-12.8 76.8h96v-409.6zM896 678.4c0-19.2-19.2-38.4-38.4-38.4h-115.2c-19.2 0-38.4 19.2-38.4 38.4v115.2c0 19.2 19.2 38.4 38.4 38.4h115.2c19.2 0 38.4-19.2 38.4-38.4v-115.2z" />
|
||||||
|
<glyph unicode="" d="M64 960l64-896 384-128 384 128 64 896h-896zM780.8 659.2h-428.8l12.8-115.2h409.6l-32-352-230.4-64-230.4 64-12.8 179.2h115.2v-89.6l128-32 128 32 12.8 147.2h-390.4l-32 345.6h563.2l-12.8-115.2z" />
|
||||||
|
<glyph unicode="" d="M0 435.2c0-44.8 6.4-89.6 12.8-128s19.2-70.4 38.4-96c12.8-25.6 32-51.2 57.6-70.4s51.2-38.4 76.8-51.2c25.6-12.8 57.6-25.6 96-32l108.8-19.2s76.8-6.4 121.6-6.4 83.2 0 121.6 6.4 70.4 6.4 108.8 19.2c38.4 6.4 70.4 19.2 96 32s51.2 32 76.8 51.2c25.6 19.2 44.8 44.8 57.6 70.4 12.8 25.6 25.6 57.6 38.4 96 12.8 38.4 12.8 83.2 12.8 128 0 83.2-25.6 153.6-83.2 217.6l6.4 25.6c0 12.8 6.4 25.6 6.4 44.8v64l-19.2 76.8h-32c-12.8 0-25.6-6.4-44.8-6.4-19.2-6.4-38.4-12.8-64-25.6l-76.8-51.2c-51.2 12.8-121.6 19.2-204.8 19.2s-153.6-6.4-198.4-19.2c-32 19.2-57.6 32-83.2 44.8-25.6 12.8-44.8 19.2-64 25.6l-38.4 12.8h-38.4l-19.2-76.8c-6.4-25.6-6.4-44.8 0-64 0-19.2 6.4-32 6.4-44.8 0-12.8 6.4-19.2 6.4-25.6-57.6-64-83.2-134.4-83.2-217.6zM128 307.2c0 44.8 19.2 89.6 64 134.4 12.8 12.8 25.6 19.2 44.8 25.6 19.2 6.4 38.4 12.8 57.6 12.8h64c19.2 0 44.8 0 76.8-6.4h153.6c25.6 0 51.2 6.4 70.4 6.4h64c19.2 0 44.8-6.4 57.6-12.8 19.2-6.4 32-12.8 44.8-25.6 44.8-38.4 64-83.2 64-134.4 0-25.6-6.4-51.2-12.8-76.8l-25.6-57.6c-12.8-12.8-25.6-25.6-44.8-38.4-19.2-12.8-38.4-19.2-57.6-25.6-19.2-6.4-44.8-12.8-70.4-12.8-32 0-57.6-6.4-76.8-6.4-25.6 6.4-57.6 6.4-89.6 6.4h-89.6c-25.6 0-51.2 0-76.8 6.4-32 0-51.2 6.4-70.4 12.8-19.2 6.4-38.4 12.8-57.6 25.6-25.6 12.8-44.8 19.2-51.2 38.4-12.8 12.8-19.2 32-25.6 57.6-12.8 19.2-12.8 44.8-12.8 70.4zM640 320c0-51.2 25.6-96 64-96s64 44.8 64 96-25.6 96-64 96c-32 0-64-44.8-64-96zM256 320c0-51.2 32-96 64-96s64 44.8 64 96-25.6 96-64 96-64-44.8-64-96z" />
|
||||||
|
<glyph unicode="" d="M985.6 364.8l-390.4-390.4c-44.8-44.8-121.6-44.8-166.4 0l-396.8 390.4c-44.8 44.8-44.8 121.6 0 166.4l390.4 390.4c51.2 51.2 128 51.2 172.8 6.4l179.2-179.2-262.4-268.8-102.4 102.4c-32 32-83.2 32-108.8 0l-83.2-83.2c-32-32-32-76.8 0-108.8l236.8-236.8c25.6-25.6 57.6-25.6 83.2-19.2 12.8 6.4 19.2 6.4 25.6 19.2l396.8 403.2 19.2-19.2c57.6-51.2 57.6-128 6.4-172.8zM550.4 224c-12.8-12.8-44.8-12.8-44.8-12.8s-32 0-38.4 12.8l-179.2 185.6c-12.8 12.8-12.8 38.4 0 57.6l51.2 51.2c12.8 12.8 44.8 12.8 57.6 0l115.2-121.6 352 352c12.8 12.8 44.8 12.8 57.6 0l51.2-51.2c12.8-12.8 12.8-44.8 0-57.6l-422.4-416z" />
|
||||||
|
<glyph unicode="" d="M512 748.8l211.2 179.2 300.8-198.4-204.8-166.4-307.2 185.6zM1024 396.8l-300.8-198.4-211.2 172.8 300.8 185.6 211.2-160zM300.8 198.4l-300.8 198.4 204.8 166.4 307.2-192-211.2-172.8zM0 729.6l300.8 198.4 211.2-179.2-300.8-192-211.2 172.8zM512 332.8l211.2-179.2 89.6 57.6v-64l-300.8-179.2-300.8 179.2v64l89.6-51.2 211.2 172.8z" />
|
||||||
|
<glyph unicode="" d="M864 249.6c-38.4 0-64 32-64 64v256c0 38.4 32 64 64 64 38.4 0 64-32 64-64v-256c0-32-25.6-64-64-64zM697.6 102.4h-38.4v-108.8c0-38.4-25.6-64-57.6-64s-57.6 25.6-57.6 64v108.8h-70.4v-108.8c0-38.4-25.6-64-57.6-64s-57.6 25.6-57.6 64v108.8h-32c-19.2 0-38.4 19.2-38.4 44.8v428.8h448v-422.4c0-32-12.8-51.2-38.4-51.2zM736 633.6h-448c0 89.6 32 153.6 76.8 192l-70.4 83.2c-6.4 12.8-6.4 25.6 0 38.4 12.8 12.8 25.6 12.8 38.4 0l83.2-96c32 12.8 64 19.2 96 19.2s70.4-6.4 96-19.2l83.2 96c12.8 12.8 25.6 12.8 38.4 0s12.8-32 0-38.4l-70.4-83.2c44.8-32 76.8-102.4 76.8-192zM441.6 761.6c-12.8 0-25.6-12.8-25.6-32s12.8-32 25.6-32 25.6 12.8 25.6 32-12.8 32-25.6 32zM582.4 761.6c-12.8 0-25.6-12.8-25.6-32s12.8-32 25.6-32 25.6 19.2 25.6 32-12.8 32-25.6 32zM160 249.6c-38.4 0-64 32-64 64v256c0 38.4 25.6 64 64 64s64-32 64-64v-256c0-32-25.6-64-64-64z" />
|
||||||
|
<glyph unicode="" d="M921.6 211.2c-32-153.6-115.2-211.2-147.2-249.6-32-25.6-121.6-25.6-153.6-6.4-38.4 25.6-134.4 25.6-166.4 0-44.8-32-115.2-19.2-128-12.8-256 179.2-352 716.8 12.8 774.4 64 12.8 134.4-32 134.4-32 51.2-25.6 70.4-12.8 115.2 6.4 96 44.8 243.2 44.8 313.6-76.8-147.2-96-153.6-294.4 19.2-403.2zM716.8 960c12.8-70.4-64-224-204.8-230.4-12.8 38.4 32 217.6 204.8 230.4z" />
|
||||||
|
</font></defs></svg>
|
||||||
|
之后 宽度: | 高度: | 大小: 56 KiB |
二进制文件未显示。
二进制文件未显示。
13
docs/waifu_plugin/jquery-ui.min.js
vendored
普通文件
13
docs/waifu_plugin/jquery-ui.min.js
vendored
普通文件
文件差异因一行或多行过长而隐藏
4
docs/waifu_plugin/jquery.min.js
vendored
普通文件
4
docs/waifu_plugin/jquery.min.js
vendored
普通文件
文件差异因一行或多行过长而隐藏
4238
docs/waifu_plugin/live2d.js
普通文件
4238
docs/waifu_plugin/live2d.js
普通文件
文件差异内容过多而无法显示
加载差异
1
docs/waifu_plugin/source
普通文件
1
docs/waifu_plugin/source
普通文件
@@ -0,0 +1 @@
|
|||||||
|
https://github.com/fghrsh/live2d_demo
|
||||||
373
docs/waifu_plugin/waifu-tips.js
普通文件
373
docs/waifu_plugin/waifu-tips.js
普通文件
@@ -0,0 +1,373 @@
|
|||||||
|
window.live2d_settings = Array(); /*
|
||||||
|
|
||||||
|
く__,.ヘヽ. / ,ー、 〉
|
||||||
|
\ ', !-─‐-i / /´
|
||||||
|
/`ー' L//`ヽ、 Live2D 看板娘 参数设置
|
||||||
|
/ /, /| , , ', Version 1.4.2
|
||||||
|
イ / /-‐/ i L_ ハ ヽ! i Update 2018.11.12
|
||||||
|
レ ヘ 7イ`ト レ'ァ-ト、!ハ| |
|
||||||
|
!,/7 '0' ´0iソ| |
|
||||||
|
|.从" _ ,,,, / |./ | 网页添加 Live2D 看板娘
|
||||||
|
レ'| i>.、,,__ _,.イ / .i | https://www.fghrsh.net/post/123.html
|
||||||
|
レ'| | / k_7_/レ'ヽ, ハ. |
|
||||||
|
| |/i 〈|/ i ,.ヘ | i | Thanks
|
||||||
|
.|/ / i: ヘ! \ | journey-ad / https://github.com/journey-ad/live2d_src
|
||||||
|
kヽ>、ハ _,.ヘ、 /、! xiazeyu / https://github.com/xiazeyu/live2d-widget.js
|
||||||
|
!'〈//`T´', \ `'7'ーr' Live2d Cubism SDK WebGL 2.1 Projrct & All model authors.
|
||||||
|
レ'ヽL__|___i,___,ンレ|ノ
|
||||||
|
ト-,/ |___./
|
||||||
|
'ー' !_,.:*********************************************************************************/
|
||||||
|
|
||||||
|
|
||||||
|
// 后端接口
|
||||||
|
live2d_settings['modelAPI'] = '//live2d.fghrsh.net/api/'; // 自建 API 修改这里
|
||||||
|
live2d_settings['tipsMessage'] = 'waifu-tips.json'; // 同目录下可省略路径
|
||||||
|
live2d_settings['hitokotoAPI'] = 'lwl12.com'; // 一言 API,可选 'lwl12.com', 'hitokoto.cn', 'jinrishici.com'(古诗词)
|
||||||
|
|
||||||
|
// 默认模型
|
||||||
|
live2d_settings['modelId'] = 1; // 默认模型 ID,可在 F12 控制台找到
|
||||||
|
live2d_settings['modelTexturesId'] = 53; // 默认材质 ID,可在 F12 控制台找到
|
||||||
|
|
||||||
|
// 工具栏设置
|
||||||
|
live2d_settings['showToolMenu'] = true; // 显示 工具栏 ,可选 true(真), false(假)
|
||||||
|
live2d_settings['canCloseLive2d'] = true; // 显示 关闭看板娘 按钮,可选 true(真), false(假)
|
||||||
|
live2d_settings['canSwitchModel'] = true; // 显示 模型切换 按钮,可选 true(真), false(假)
|
||||||
|
live2d_settings['canSwitchTextures'] = true; // 显示 材质切换 按钮,可选 true(真), false(假)
|
||||||
|
live2d_settings['canSwitchHitokoto'] = true; // 显示 一言切换 按钮,可选 true(真), false(假)
|
||||||
|
live2d_settings['canTakeScreenshot'] = true; // 显示 看板娘截图 按钮,可选 true(真), false(假)
|
||||||
|
live2d_settings['canTurnToHomePage'] = true; // 显示 返回首页 按钮,可选 true(真), false(假)
|
||||||
|
live2d_settings['canTurnToAboutPage'] = true; // 显示 跳转关于页 按钮,可选 true(真), false(假)
|
||||||
|
|
||||||
|
// 模型切换模式
|
||||||
|
live2d_settings['modelStorage'] = true; // 记录 ID (刷新后恢复),可选 true(真), false(假)
|
||||||
|
live2d_settings['modelRandMode'] = 'switch'; // 模型切换,可选 'rand'(随机), 'switch'(顺序)
|
||||||
|
live2d_settings['modelTexturesRandMode']= 'rand'; // 材质切换,可选 'rand'(随机), 'switch'(顺序)
|
||||||
|
|
||||||
|
// 提示消息选项
|
||||||
|
live2d_settings['showHitokoto'] = true; // 显示一言
|
||||||
|
live2d_settings['showF12Status'] = true; // 显示加载状态
|
||||||
|
live2d_settings['showF12Message'] = false; // 显示看板娘消息
|
||||||
|
live2d_settings['showF12OpenMsg'] = true; // 显示控制台打开提示
|
||||||
|
live2d_settings['showCopyMessage'] = true; // 显示 复制内容 提示
|
||||||
|
live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词
|
||||||
|
|
||||||
|
//看板娘样式设置
|
||||||
|
live2d_settings['waifuSize'] = '280x250'; // 看板娘大小,例如 '280x250', '600x535'
|
||||||
|
live2d_settings['waifuTipsSize'] = '250x70'; // 提示框大小,例如 '250x70', '570x150'
|
||||||
|
live2d_settings['waifuFontSize'] = '12px'; // 提示框字体,例如 '12px', '30px'
|
||||||
|
live2d_settings['waifuToolFont'] = '14px'; // 工具栏字体,例如 '14px', '36px'
|
||||||
|
live2d_settings['waifuToolLine'] = '20px'; // 工具栏行高,例如 '20px', '36px'
|
||||||
|
live2d_settings['waifuToolTop'] = '0px' // 工具栏顶部边距,例如 '0px', '-60px'
|
||||||
|
live2d_settings['waifuMinWidth'] = '768px'; // 面页小于 指定宽度 隐藏看板娘,例如 'disable'(禁用), '768px'
|
||||||
|
live2d_settings['waifuEdgeSide'] = 'left:0'; // 看板娘贴边方向,例如 'left:0'(靠左 0px), 'right:30'(靠右 30px)
|
||||||
|
live2d_settings['waifuDraggable'] = 'disable'; // 拖拽样式,例如 'disable'(禁用), 'axis-x'(只能水平拖拽), 'unlimited'(自由拖拽)
|
||||||
|
live2d_settings['waifuDraggableRevert'] = true; // 松开鼠标还原拖拽位置,可选 true(真), false(假)
|
||||||
|
|
||||||
|
// 其他杂项设置
|
||||||
|
live2d_settings['l2dVersion'] = '1.4.2'; // 当前版本
|
||||||
|
live2d_settings['l2dVerDate'] = '2018.11.12'; // 版本更新日期
|
||||||
|
live2d_settings['homePageUrl'] = 'auto'; // 主页地址,可选 'auto'(自动), '{URL 网址}'
|
||||||
|
live2d_settings['aboutPageUrl'] = 'https://www.fghrsh.net/post/123.html'; // 关于页地址, '{URL 网址}'
|
||||||
|
live2d_settings['screenshotCaptureName']= 'live2d.png'; // 看板娘截图文件名,例如 'live2d.png'
|
||||||
|
|
||||||
|
/****************************************************************************************************/
|
||||||
|
|
||||||
|
String.prototype.render = function(context) {
|
||||||
|
var tokenReg = /(\\)?\{([^\{\}\\]+)(\\)?\}/g;
|
||||||
|
|
||||||
|
return this.replace(tokenReg, function (word, slash1, token, slash2) {
|
||||||
|
if (slash1 || slash2) { return word.replace('\\', ''); }
|
||||||
|
|
||||||
|
var variables = token.replace(/\s/g, '').split('.');
|
||||||
|
var currentObject = context;
|
||||||
|
var i, length, variable;
|
||||||
|
|
||||||
|
for (i = 0, length = variables.length; i < length; ++i) {
|
||||||
|
variable = variables[i];
|
||||||
|
currentObject = currentObject[variable];
|
||||||
|
if (currentObject === undefined || currentObject === null) return '';
|
||||||
|
}
|
||||||
|
return currentObject;
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
var re = /x/;
|
||||||
|
console.log(re);
|
||||||
|
|
||||||
|
function empty(obj) {return typeof obj=="undefined"||obj==null||obj==""?true:false}
|
||||||
|
function getRandText(text) {return Array.isArray(text) ? text[Math.floor(Math.random() * text.length + 1)-1] : text}
|
||||||
|
|
||||||
|
function showMessage(text, timeout, flag) {
|
||||||
|
if(flag || sessionStorage.getItem('waifu-text') === '' || sessionStorage.getItem('waifu-text') === null){
|
||||||
|
if(Array.isArray(text)) text = text[Math.floor(Math.random() * text.length + 1)-1];
|
||||||
|
if (live2d_settings.showF12Message) console.log('[Message]', text.replace(/<[^<>]+>/g,''));
|
||||||
|
|
||||||
|
if(flag) sessionStorage.setItem('waifu-text', text);
|
||||||
|
|
||||||
|
$('.waifu-tips').stop();
|
||||||
|
$('.waifu-tips').html(text).fadeTo(200, 1);
|
||||||
|
if (timeout === undefined) timeout = 5000;
|
||||||
|
hideMessage(timeout);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function hideMessage(timeout) {
|
||||||
|
$('.waifu-tips').stop().css('opacity',1);
|
||||||
|
if (timeout === undefined) timeout = 5000;
|
||||||
|
window.setTimeout(function() {sessionStorage.removeItem('waifu-text')}, timeout);
|
||||||
|
$('.waifu-tips').delay(timeout).fadeTo(200, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
function initModel(waifuPath, type) {
|
||||||
|
/* console welcome message */
|
||||||
|
eval(function(p,a,c,k,e,r){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--)r[e(c)]=k[c]||e(c);k=[function(e){return r[e]}];e=function(){return'\\w+'};c=1};while(c--)if(k[c])p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c]);return p}('8.d(" ");8.d("\\U,.\\y\\5.\\1\\1\\1\\1/\\1,\\u\\2 \\H\\n\\1\\1\\1\\1\\1\\b \', !-\\r\\j-i\\1/\\1/\\g\\n\\1\\1\\1 \\1 \\a\\4\\f\'\\1\\1\\1 L/\\a\\4\\5\\2\\n\\1\\1 \\1 /\\1 \\a,\\1 /|\\1 ,\\1 ,\\1\\1\\1 \',\\n\\1\\1\\1\\q \\1/ /-\\j/\\1\\h\\E \\9 \\5!\\1 i\\n\\1\\1\\1 \\3 \\6 7\\q\\4\\c\\1 \\3\'\\s-\\c\\2!\\t|\\1 |\\n\\1\\1\\1\\1 !,/7 \'0\'\\1\\1 \\X\\w| \\1 |\\1\\1\\1\\n\\1\\1\\1\\1 |.\\x\\"\\1\\l\\1\\1 ,,,, / |./ \\1 |\\n\\1\\1\\1\\1 \\3\'| i\\z.\\2,,A\\l,.\\B / \\1.i \\1|\\n\\1\\1\\1\\1\\1 \\3\'| | / C\\D/\\3\'\\5,\\1\\9.\\1|\\n\\1\\1\\1\\1\\1\\1 | |/i \\m|/\\1 i\\1,.\\6 |\\F\\1|\\n\\1\\1\\1\\1\\1\\1.|/ /\\1\\h\\G \\1 \\6!\\1\\1\\b\\1|\\n\\1\\1\\1 \\1 \\1 k\\5>\\2\\9 \\1 o,.\\6\\2 \\1 /\\2!\\n\\1\\1\\1\\1\\1\\1 !\'\\m//\\4\\I\\g\', \\b \\4\'7\'\\J\'\\n\\1\\1\\1\\1\\1\\1 \\3\'\\K|M,p,\\O\\3|\\P\\n\\1\\1\\1\\1\\1 \\1\\1\\1\\c-,/\\1|p./\\n\\1\\1\\1\\1\\1 \\1\\1\\1\'\\f\'\\1\\1!o,.:\\Q \\R\\S\\T v"+e.V+" / W "+e.N);8.d(" ");',60,60,'|u3000|uff64|uff9a|uff40|u30fd|uff8d||console|uff8a|uff0f|uff3c|uff84|log|live2d_settings|uff70|u00b4|uff49||u2010||u3000_|u3008||_|___|uff72|u2500|uff67|u30cf|u30fc||u30bd|u4ece|u30d8|uff1e|__|u30a4|k_|uff17_|u3000L_|u3000i|uff1a|u3009|uff34|uff70r|u30fdL__||___i|l2dVerDate|u30f3|u30ce|nLive2D|u770b|u677f|u5a18|u304f__|l2dVersion|FGHRSH|u00b40i'.split('|'),0,{}));
|
||||||
|
|
||||||
|
/* 判断 JQuery */
|
||||||
|
if (typeof($.ajax) != 'function') typeof(jQuery.ajax) == 'function' ? window.$ = jQuery : console.log('[Error] JQuery is not defined.');
|
||||||
|
|
||||||
|
/* 加载看板娘样式 */
|
||||||
|
live2d_settings.waifuSize = live2d_settings.waifuSize.split('x');
|
||||||
|
live2d_settings.waifuTipsSize = live2d_settings.waifuTipsSize.split('x');
|
||||||
|
live2d_settings.waifuEdgeSide = live2d_settings.waifuEdgeSide.split(':');
|
||||||
|
|
||||||
|
$("#live2d").attr("width",live2d_settings.waifuSize[0]);
|
||||||
|
$("#live2d").attr("height",live2d_settings.waifuSize[1]);
|
||||||
|
$(".waifu-tips").width(live2d_settings.waifuTipsSize[0]);
|
||||||
|
$(".waifu-tips").height(live2d_settings.waifuTipsSize[1]);
|
||||||
|
$(".waifu-tips").css("top",live2d_settings.waifuToolTop);
|
||||||
|
$(".waifu-tips").css("font-size",live2d_settings.waifuFontSize);
|
||||||
|
$(".waifu-tool").css("font-size",live2d_settings.waifuToolFont);
|
||||||
|
$(".waifu-tool span").css("line-height",live2d_settings.waifuToolLine);
|
||||||
|
|
||||||
|
if (live2d_settings.waifuEdgeSide[0] == 'left') $(".waifu").css("left",live2d_settings.waifuEdgeSide[1]+'px');
|
||||||
|
else if (live2d_settings.waifuEdgeSide[0] == 'right') $(".waifu").css("right",live2d_settings.waifuEdgeSide[1]+'px');
|
||||||
|
|
||||||
|
window.waifuResize = function() { $(window).width() <= Number(live2d_settings.waifuMinWidth.replace('px','')) ? $(".waifu").hide() : $(".waifu").show(); };
|
||||||
|
if (live2d_settings.waifuMinWidth != 'disable') { waifuResize(); $(window).resize(function() {waifuResize()}); }
|
||||||
|
|
||||||
|
try {
|
||||||
|
if (live2d_settings.waifuDraggable == 'axis-x') $(".waifu").draggable({ axis: "x", revert: live2d_settings.waifuDraggableRevert });
|
||||||
|
else if (live2d_settings.waifuDraggable == 'unlimited') $(".waifu").draggable({ revert: live2d_settings.waifuDraggableRevert });
|
||||||
|
else $(".waifu").css("transition", 'all .3s ease-in-out');
|
||||||
|
} catch(err) { console.log('[Error] JQuery UI is not defined.') }
|
||||||
|
|
||||||
|
live2d_settings.homePageUrl = live2d_settings.homePageUrl == 'auto' ? window.location.protocol+'//'+window.location.hostname+'/' : live2d_settings.homePageUrl;
|
||||||
|
if (window.location.protocol == 'file:' && live2d_settings.modelAPI.substr(0,2) == '//') live2d_settings.modelAPI = 'http:'+live2d_settings.modelAPI;
|
||||||
|
|
||||||
|
$('.waifu-tool .fui-home').click(function (){
|
||||||
|
//window.location = 'https://www.fghrsh.net/';
|
||||||
|
window.location = live2d_settings.homePageUrl;
|
||||||
|
});
|
||||||
|
|
||||||
|
$('.waifu-tool .fui-info-circle').click(function (){
|
||||||
|
//window.open('https://imjad.cn/archives/lab/add-dynamic-poster-girl-with-live2d-to-your-blog-02');
|
||||||
|
window.open(live2d_settings.aboutPageUrl);
|
||||||
|
});
|
||||||
|
|
||||||
|
if (typeof(waifuPath) == "object") loadTipsMessage(waifuPath); else {
|
||||||
|
$.ajax({
|
||||||
|
cache: true,
|
||||||
|
url: waifuPath == '' ? live2d_settings.tipsMessage : (waifuPath.substr(waifuPath.length-15)=='waifu-tips.json'?waifuPath:waifuPath+'waifu-tips.json'),
|
||||||
|
dataType: "json",
|
||||||
|
success: function (result){ loadTipsMessage(result); }
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!live2d_settings.showToolMenu) $('.waifu-tool').hide();
|
||||||
|
if (!live2d_settings.canCloseLive2d) $('.waifu-tool .fui-cross').hide();
|
||||||
|
if (!live2d_settings.canSwitchModel) $('.waifu-tool .fui-eye').hide();
|
||||||
|
if (!live2d_settings.canSwitchTextures) $('.waifu-tool .fui-user').hide();
|
||||||
|
if (!live2d_settings.canSwitchHitokoto) $('.waifu-tool .fui-chat').hide();
|
||||||
|
if (!live2d_settings.canTakeScreenshot) $('.waifu-tool .fui-photo').hide();
|
||||||
|
if (!live2d_settings.canTurnToHomePage) $('.waifu-tool .fui-home').hide();
|
||||||
|
if (!live2d_settings.canTurnToAboutPage) $('.waifu-tool .fui-info-circle').hide();
|
||||||
|
|
||||||
|
if (waifuPath === undefined) waifuPath = '';
|
||||||
|
var modelId = localStorage.getItem('modelId');
|
||||||
|
var modelTexturesId = localStorage.getItem('modelTexturesId');
|
||||||
|
|
||||||
|
if (!live2d_settings.modelStorage || modelId == null) {
|
||||||
|
var modelId = live2d_settings.modelId;
|
||||||
|
var modelTexturesId = live2d_settings.modelTexturesId;
|
||||||
|
} loadModel(modelId, modelTexturesId);
|
||||||
|
}
|
||||||
|
|
||||||
|
function loadModel(modelId, modelTexturesId=0) {
|
||||||
|
if (live2d_settings.modelStorage) {
|
||||||
|
localStorage.setItem('modelId', modelId);
|
||||||
|
localStorage.setItem('modelTexturesId', modelTexturesId);
|
||||||
|
} else {
|
||||||
|
sessionStorage.setItem('modelId', modelId);
|
||||||
|
sessionStorage.setItem('modelTexturesId', modelTexturesId);
|
||||||
|
} loadlive2d('live2d', live2d_settings.modelAPI+'get/?id='+modelId+'-'+modelTexturesId, (live2d_settings.showF12Status ? console.log('[Status]','live2d','模型',modelId+'-'+modelTexturesId,'加载完成'):null));
|
||||||
|
}
|
||||||
|
|
||||||
|
function loadTipsMessage(result) {
|
||||||
|
window.waifu_tips = result;
|
||||||
|
|
||||||
|
$.each(result.mouseover, function (index, tips){
|
||||||
|
$(document).on("mouseover", tips.selector, function (){
|
||||||
|
var text = getRandText(tips.text);
|
||||||
|
text = text.render({text: $(this).text()});
|
||||||
|
showMessage(text, 3000);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
$.each(result.click, function (index, tips){
|
||||||
|
$(document).on("click", tips.selector, function (){
|
||||||
|
var text = getRandText(tips.text);
|
||||||
|
text = text.render({text: $(this).text()});
|
||||||
|
showMessage(text, 3000, true);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
$.each(result.seasons, function (index, tips){
|
||||||
|
var now = new Date();
|
||||||
|
var after = tips.date.split('-')[0];
|
||||||
|
var before = tips.date.split('-')[1] || after;
|
||||||
|
|
||||||
|
if((after.split('/')[0] <= now.getMonth()+1 && now.getMonth()+1 <= before.split('/')[0]) &&
|
||||||
|
(after.split('/')[1] <= now.getDate() && now.getDate() <= before.split('/')[1])){
|
||||||
|
var text = getRandText(tips.text);
|
||||||
|
text = text.render({year: now.getFullYear()});
|
||||||
|
showMessage(text, 6000, true);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if (live2d_settings.showF12OpenMsg) {
|
||||||
|
re.toString = function() {
|
||||||
|
showMessage(getRandText(result.waifu.console_open_msg), 5000, true);
|
||||||
|
return '';
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (live2d_settings.showCopyMessage) {
|
||||||
|
$(document).on('copy', function() {
|
||||||
|
showMessage(getRandText(result.waifu.copy_message), 5000, true);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
$('.waifu-tool .fui-photo').click(function(){
|
||||||
|
showMessage(getRandText(result.waifu.screenshot_message), 5000, true);
|
||||||
|
window.Live2D.captureName = live2d_settings.screenshotCaptureName;
|
||||||
|
window.Live2D.captureFrame = true;
|
||||||
|
});
|
||||||
|
|
||||||
|
$('.waifu-tool .fui-cross').click(function(){
|
||||||
|
sessionStorage.setItem('waifu-dsiplay', 'none');
|
||||||
|
showMessage(getRandText(result.waifu.hidden_message), 1300, true);
|
||||||
|
window.setTimeout(function() {$('.waifu').hide();}, 1300);
|
||||||
|
});
|
||||||
|
|
||||||
|
window.showWelcomeMessage = function(result) {
|
||||||
|
showMessage('欢迎使用GPT-Academic', 6000);
|
||||||
|
}; if (live2d_settings.showWelcomeMessage) showWelcomeMessage(result);
|
||||||
|
|
||||||
|
var waifu_tips = result.waifu;
|
||||||
|
|
||||||
|
function loadOtherModel() {
|
||||||
|
var modelId = modelStorageGetItem('modelId');
|
||||||
|
var modelRandMode = live2d_settings.modelRandMode;
|
||||||
|
|
||||||
|
$.ajax({
|
||||||
|
cache: modelRandMode == 'switch' ? true : false,
|
||||||
|
url: live2d_settings.modelAPI+modelRandMode+'/?id='+modelId,
|
||||||
|
dataType: "json",
|
||||||
|
success: function(result) {
|
||||||
|
loadModel(result.model['id']);
|
||||||
|
var message = result.model['message'];
|
||||||
|
$.each(waifu_tips.model_message, function(i,val) {if (i==result.model['id']) message = getRandText(val)});
|
||||||
|
showMessage(message, 3000, true);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function loadRandTextures() {
|
||||||
|
var modelId = modelStorageGetItem('modelId');
|
||||||
|
var modelTexturesId = modelStorageGetItem('modelTexturesId');
|
||||||
|
var modelTexturesRandMode = live2d_settings.modelTexturesRandMode;
|
||||||
|
|
||||||
|
$.ajax({
|
||||||
|
cache: modelTexturesRandMode == 'switch' ? true : false,
|
||||||
|
url: live2d_settings.modelAPI+modelTexturesRandMode+'_textures/?id='+modelId+'-'+modelTexturesId,
|
||||||
|
dataType: "json",
|
||||||
|
success: function(result) {
|
||||||
|
if (result.textures['id'] == 1 && (modelTexturesId == 1 || modelTexturesId == 0))
|
||||||
|
showMessage(waifu_tips.load_rand_textures[0], 3000, true);
|
||||||
|
else showMessage(waifu_tips.load_rand_textures[1], 3000, true);
|
||||||
|
loadModel(modelId, result.textures['id']);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function modelStorageGetItem(key) { return live2d_settings.modelStorage ? localStorage.getItem(key) : sessionStorage.getItem(key); }
|
||||||
|
|
||||||
|
/* 检测用户活动状态,并在空闲时显示一言 */
|
||||||
|
if (live2d_settings.showHitokoto) {
|
||||||
|
window.getActed = false; window.hitokotoTimer = 0; window.hitokotoInterval = false;
|
||||||
|
$(document).mousemove(function(e){getActed = true;}).keydown(function(){getActed = true;});
|
||||||
|
setInterval(function(){ if (!getActed) ifActed(); else elseActed(); }, 1000);
|
||||||
|
}
|
||||||
|
|
||||||
|
function ifActed() {
|
||||||
|
if (!hitokotoInterval) {
|
||||||
|
hitokotoInterval = true;
|
||||||
|
hitokotoTimer = window.setInterval(showHitokotoActed, 30000);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function elseActed() {
|
||||||
|
getActed = hitokotoInterval = false;
|
||||||
|
window.clearInterval(hitokotoTimer);
|
||||||
|
}
|
||||||
|
|
||||||
|
function showHitokotoActed() {
|
||||||
|
if ($(document)[0].visibilityState == 'visible') showHitokoto();
|
||||||
|
}
|
||||||
|
|
||||||
|
function showHitokoto() {
|
||||||
|
switch(live2d_settings.hitokotoAPI) {
|
||||||
|
case 'lwl12.com':
|
||||||
|
$.getJSON('https://api.lwl12.com/hitokoto/v1?encode=realjson',function(result){
|
||||||
|
if (!empty(result.source)) {
|
||||||
|
var text = waifu_tips.hitokoto_api_message['lwl12.com'][0];
|
||||||
|
if (!empty(result.author)) text += waifu_tips.hitokoto_api_message['lwl12.com'][1];
|
||||||
|
text = text.render({source: result.source, creator: result.author});
|
||||||
|
window.setTimeout(function() {showMessage(text+waifu_tips.hitokoto_api_message['lwl12.com'][2], 3000, true);}, 5000);
|
||||||
|
} showMessage(result.text, 5000, true);
|
||||||
|
});break;
|
||||||
|
case 'fghrsh.net':
|
||||||
|
$.getJSON('https://api.fghrsh.net/hitokoto/rand/?encode=jsc&uid=3335',function(result){
|
||||||
|
if (!empty(result.source)) {
|
||||||
|
var text = waifu_tips.hitokoto_api_message['fghrsh.net'][0];
|
||||||
|
text = text.render({source: result.source, date: result.date});
|
||||||
|
window.setTimeout(function() {showMessage(text, 3000, true);}, 5000);
|
||||||
|
showMessage(result.hitokoto, 5000, true);
|
||||||
|
}
|
||||||
|
});break;
|
||||||
|
case 'jinrishici.com':
|
||||||
|
$.ajax({
|
||||||
|
url: 'https://v2.jinrishici.com/one.json',
|
||||||
|
xhrFields: {withCredentials: true},
|
||||||
|
success: function (result, status) {
|
||||||
|
if (!empty(result.data.origin.title)) {
|
||||||
|
var text = waifu_tips.hitokoto_api_message['jinrishici.com'][0];
|
||||||
|
text = text.render({title: result.data.origin.title, dynasty: result.data.origin.dynasty, author:result.data.origin.author});
|
||||||
|
window.setTimeout(function() {showMessage(text, 3000, true);}, 5000);
|
||||||
|
} showMessage(result.data.content, 5000, true);
|
||||||
|
}
|
||||||
|
});break;
|
||||||
|
default:
|
||||||
|
$.getJSON('https://v1.hitokoto.cn',function(result){
|
||||||
|
if (!empty(result.from)) {
|
||||||
|
var text = waifu_tips.hitokoto_api_message['hitokoto.cn'][0];
|
||||||
|
text = text.render({source: result.from, creator: result.creator});
|
||||||
|
window.setTimeout(function() {showMessage(text, 3000, true);}, 5000);
|
||||||
|
}
|
||||||
|
showMessage(result.hitokoto, 5000, true);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
$('.waifu-tool .fui-eye').click(function (){loadOtherModel()});
|
||||||
|
$('.waifu-tool .fui-user').click(function (){loadRandTextures()});
|
||||||
|
$('.waifu-tool .fui-chat').click(function (){showHitokoto()});
|
||||||
|
}
|
||||||
114
docs/waifu_plugin/waifu-tips.json
普通文件
114
docs/waifu_plugin/waifu-tips.json
普通文件
@@ -0,0 +1,114 @@
|
|||||||
|
{
|
||||||
|
"waifu": {
|
||||||
|
"console_open_msg": ["哈哈,你打开了控制台,是想要看看我的秘密吗?"],
|
||||||
|
"copy_message": ["你都复制了些什么呀,转载要记得加上出处哦"],
|
||||||
|
"screenshot_message": ["照好了嘛,是不是很可爱呢?"],
|
||||||
|
"hidden_message": ["我们还能再见面的吧…"],
|
||||||
|
"load_rand_textures": ["我还没有其他衣服呢", "我的新衣服好看嘛"],
|
||||||
|
"hour_tips": {
|
||||||
|
"t0-5": ["快睡觉去吧,年纪轻轻小心猝死哦"],
|
||||||
|
"t5-7": ["早上好!一日之计在于晨,美好的一天就要开始了"],
|
||||||
|
"t7-11": ["上午好!工作顺利嘛,不要久坐,多起来走动走动哦!"],
|
||||||
|
"t11-14": ["中午了,工作了一个上午,现在是午餐时间!"],
|
||||||
|
"t14-17": ["午后很容易犯困呢,今天的运动目标完成了吗?"],
|
||||||
|
"t17-19": ["傍晚了!窗外夕阳的景色很美丽呢,最美不过夕阳红~"],
|
||||||
|
"t19-21": ["晚上好,今天过得怎么样?"],
|
||||||
|
"t21-23": ["已经这么晚了呀,早点休息吧,晚安~"],
|
||||||
|
"t23-24": ["你是夜猫子呀?这么晚还不睡觉,明天起的来嘛"],
|
||||||
|
"default": ["嗨~ 快来逗我玩吧!"]
|
||||||
|
},
|
||||||
|
"referrer_message": {
|
||||||
|
"localhost": ["欢迎使用<span style=\"color:rgba(245, 20, 20, 0.62);\">『ChatGPT", "』</span>", " - "],
|
||||||
|
"baidu": ["Hello! 来自 百度搜索 的朋友<br>你是搜索 <span style=\"color:rgba(245, 20, 20, 0.62);\">", "</span> 找到的我吗?"],
|
||||||
|
"so": ["Hello! 来自 360搜索 的朋友<br>你是搜索 <span style=\"color:rgba(245, 20, 20, 0.62);\">", "</span> 找到的我吗?"],
|
||||||
|
"google": ["Hello! 来自 谷歌搜索 的朋友<br>欢迎使用<span style=\"color:rgba(245, 20, 20, 0.62);\">『ChatGPT", "』</span>", " - "],
|
||||||
|
"default": ["Hello! 来自 <span style=\"color:rgba(245, 20, 20, 0.62);\">", "</span> 的朋友"],
|
||||||
|
"none": ["欢迎使用<span style=\"color:rgba(245, 20, 20, 0.62);\">『ChatGPT", "』</span>", " - "]
|
||||||
|
},
|
||||||
|
"referrer_hostname": {
|
||||||
|
"example.com": ["示例网站"],
|
||||||
|
"www.fghrsh.net": ["FGHRSH 的博客"]
|
||||||
|
},
|
||||||
|
"model_message": {
|
||||||
|
"1": ["来自 Potion Maker 的 Pio 酱 ~"],
|
||||||
|
"2": ["来自 Potion Maker 的 Tia 酱 ~"]
|
||||||
|
},
|
||||||
|
"hitokoto_api_message": {
|
||||||
|
"lwl12.com": ["这句一言来自 <span style=\"color:#0099cc;\">『{source}』</span>", ",是 <span style=\"color:#0099cc;\">{creator}</span> 投稿的", "。"],
|
||||||
|
"fghrsh.net": ["这句一言出处是 <span style=\"color:#0099cc;\">『{source}』</span>,是 <span style=\"color:#0099cc;\">FGHRSH</span> 在 {date} 收藏的!"],
|
||||||
|
"jinrishici.com": ["这句诗词出自 <span style=\"color:#0099cc;\">《{title}》</span>,是 {dynasty}诗人 {author} 创作的!"],
|
||||||
|
"hitokoto.cn": ["这句一言来自 <span style=\"color:#0099cc;\">『{source}』</span>,是 <span style=\"color:#0099cc;\">{creator}</span> 在 hitokoto.cn 投稿的。"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mouseover": [
|
||||||
|
{ "selector": ".container a[href^='http']", "text": ["要看看 <span style=\"color:#0099cc;\">{text}</span> 么?"] },
|
||||||
|
{ "selector": ".fui-home", "text": ["点击前往首页,想回到上一页可以使用浏览器的后退功能哦"] },
|
||||||
|
{ "selector": ".fui-chat", "text": ["一言一语,一颦一笑。一字一句,一颗赛艇。"] },
|
||||||
|
{ "selector": ".fui-eye", "text": ["嗯··· 要切换 看板娘 吗?"] },
|
||||||
|
{ "selector": ".fui-user", "text": ["喜欢换装 Play 吗?"] },
|
||||||
|
{ "selector": ".fui-photo", "text": ["要拍张纪念照片吗?"] },
|
||||||
|
{ "selector": ".fui-info-circle", "text": ["这里有关于我的信息呢"] },
|
||||||
|
{ "selector": ".fui-cross", "text": ["你不喜欢我了吗..."] },
|
||||||
|
{ "selector": "#tor_show", "text": ["翻页比较麻烦吗,点击可以显示这篇文章的目录呢"] },
|
||||||
|
{ "selector": "#comment_go", "text": ["想要去评论些什么吗?"] },
|
||||||
|
{ "selector": "#night_mode", "text": ["深夜时要爱护眼睛呀"] },
|
||||||
|
{ "selector": "#qrcode", "text": ["手机扫一下就能继续看,很方便呢"] },
|
||||||
|
{ "selector": ".comment_reply", "text": ["要吐槽些什么呢"] },
|
||||||
|
{ "selector": "#back-to-top", "text": ["回到开始的地方吧"] },
|
||||||
|
{ "selector": "#author", "text": ["该怎么称呼你呢"] },
|
||||||
|
{ "selector": "#mail", "text": ["留下你的邮箱,不然就是无头像人士了"] },
|
||||||
|
{ "selector": "#url", "text": ["你的家在哪里呢,好让我去参观参观"] },
|
||||||
|
{ "selector": "#textarea", "text": ["认真填写哦,垃圾评论是禁止事项"] },
|
||||||
|
{ "selector": ".OwO-logo", "text": ["要插入一个表情吗"] },
|
||||||
|
{ "selector": "#csubmit", "text": ["要[提交]^(Commit)了吗,首次评论需要审核,请耐心等待~"] },
|
||||||
|
{ "selector": ".ImageBox", "text": ["点击图片可以放大呢"] },
|
||||||
|
{ "selector": "input[name=s]", "text": ["找不到想看的内容?搜索看看吧"] },
|
||||||
|
{ "selector": ".previous", "text": ["去上一页看看吧"] },
|
||||||
|
{ "selector": ".next", "text": ["去下一页看看吧"] },
|
||||||
|
{ "selector": ".dropdown-toggle", "text": ["这里是菜单"] },
|
||||||
|
{ "selector": "c-player a.play-icon", "text": ["想要听点音乐吗"] },
|
||||||
|
{ "selector": "c-player div.time", "text": ["在这里可以调整<span style=\"color:#0099cc;\">播放进度</span>呢"] },
|
||||||
|
{ "selector": "c-player div.volume", "text": ["在这里可以调整<span style=\"color:#0099cc;\">音量</span>呢"] },
|
||||||
|
{ "selector": "c-player div.list-button", "text": ["<span style=\"color:#0099cc;\">播放列表</span>里都有什么呢"] },
|
||||||
|
{ "selector": "c-player div.lyric-button", "text": ["有<span style=\"color:#0099cc;\">歌词</span>的话就能跟着一起唱呢"] },
|
||||||
|
{ "selector": ".waifu #live2d", "text": [
|
||||||
|
"别玩了,快去学习!",
|
||||||
|
"偶尔放松下眼睛吧。",
|
||||||
|
"看什么看(*^▽^*)",
|
||||||
|
"焦虑时,吃顿大餐心情就好啦^_^",
|
||||||
|
"你这个年纪,怎么睡得着觉的你^_^",
|
||||||
|
"修改ADD_WAIFU=False,我就不再打扰你了~",
|
||||||
|
"经常去github看看我们的更新吧,也许有好玩的新功能呢。",
|
||||||
|
"试试本地大模型吧,有的也很强大的哦。",
|
||||||
|
"很多强大的函数插件隐藏在下拉菜单中呢。",
|
||||||
|
"红色的插件,使用之前需要把文件上传进去哦。",
|
||||||
|
"想添加功能按钮吗?读读readme很容易就学会啦。",
|
||||||
|
"敏感或机密的信息,不可以问AI的哦!",
|
||||||
|
"LLM究竟是划时代的创新,还是扼杀创造力的毒药呢?"
|
||||||
|
] }
|
||||||
|
],
|
||||||
|
"click": [
|
||||||
|
{
|
||||||
|
"selector": ".waifu #live2d",
|
||||||
|
"text": [
|
||||||
|
"是…是不小心碰到了吧",
|
||||||
|
"再摸的话我可要报警了!⌇●﹏●⌇",
|
||||||
|
"110吗,这里有个变态一直在摸我(ó﹏ò。)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"seasons": [
|
||||||
|
{ "date": "01/01", "text": ["<span style=\"color:#0099cc;\">元旦</span>了呢,新的一年又开始了,今年是{year}年~"] },
|
||||||
|
{ "date": "02/14", "text": ["又是一年<span style=\"color:#0099cc;\">情人节</span>,{year}年找到对象了嘛~"] },
|
||||||
|
{ "date": "03/08", "text": ["今天是<span style=\"color:#0099cc;\">妇女节</span>!"] },
|
||||||
|
{ "date": "03/12", "text": ["今天是<span style=\"color:#0099cc;\">植树节</span>,要保护环境呀"] },
|
||||||
|
{ "date": "04/01", "text": ["悄悄告诉你一个秘密~<span style=\"background-color:#34495e;\">今天是愚人节,不要被骗了哦~</span>"] },
|
||||||
|
{ "date": "05/01", "text": ["今天是<span style=\"color:#0099cc;\">五一劳动节</span>,计划好假期去哪里了吗~"] },
|
||||||
|
{ "date": "06/01", "text": ["<span style=\"color:#0099cc;\">儿童节</span>了呢,快活的时光总是短暂,要是永远长不大该多好啊…"] },
|
||||||
|
{ "date": "09/03", "text": ["<span style=\"color:#0099cc;\">中国人民抗日战争胜利纪念日</span>,铭记历史、缅怀先烈、珍爱和平、开创未来。"] },
|
||||||
|
{ "date": "09/10", "text": ["<span style=\"color:#0099cc;\">教师节</span>,在学校要给老师问声好呀~"] },
|
||||||
|
{ "date": "10/01", "text": ["<span style=\"color:#0099cc;\">国庆节</span>,新中国已经成立69年了呢"] },
|
||||||
|
{ "date": "11/05-11/12", "text": ["今年的<span style=\"color:#0099cc;\">双十一</span>是和谁一起过的呢~"] },
|
||||||
|
{ "date": "12/20-12/31", "text": ["这几天是<span style=\"color:#0099cc;\">圣诞节</span>,主人肯定又去剁手买买买了~"] }
|
||||||
|
]
|
||||||
|
}
|
||||||
290
docs/waifu_plugin/waifu.css
普通文件
290
docs/waifu_plugin/waifu.css
普通文件
@@ -0,0 +1,290 @@
|
|||||||
|
.waifu {
|
||||||
|
position: fixed;
|
||||||
|
bottom: 0;
|
||||||
|
z-index: 1;
|
||||||
|
font-size: 0;
|
||||||
|
-webkit-transform: translateY(3px);
|
||||||
|
transform: translateY(3px);
|
||||||
|
}
|
||||||
|
.waifu:hover {
|
||||||
|
-webkit-transform: translateY(0);
|
||||||
|
transform: translateY(0);
|
||||||
|
}
|
||||||
|
.waifu-tips {
|
||||||
|
opacity: 0;
|
||||||
|
margin: -20px 20px;
|
||||||
|
padding: 5px 10px;
|
||||||
|
border: 1px solid rgba(224, 186, 140, 0.62);
|
||||||
|
border-radius: 12px;
|
||||||
|
background-color: rgba(236, 217, 188, 0.5);
|
||||||
|
box-shadow: 0 3px 15px 2px rgba(191, 158, 118, 0.2);
|
||||||
|
text-overflow: ellipsis;
|
||||||
|
overflow: hidden;
|
||||||
|
position: absolute;
|
||||||
|
animation-delay: 5s;
|
||||||
|
animation-duration: 50s;
|
||||||
|
animation-iteration-count: infinite;
|
||||||
|
animation-name: shake;
|
||||||
|
animation-timing-function: ease-in-out;
|
||||||
|
}
|
||||||
|
.waifu-tool {
|
||||||
|
display: none;
|
||||||
|
color: #aaa;
|
||||||
|
top: 50px;
|
||||||
|
right: 10px;
|
||||||
|
position: absolute;
|
||||||
|
}
|
||||||
|
.waifu:hover .waifu-tool {
|
||||||
|
display: block;
|
||||||
|
}
|
||||||
|
.waifu-tool span {
|
||||||
|
display: block;
|
||||||
|
cursor: pointer;
|
||||||
|
color: #5b6c7d;
|
||||||
|
transition: 0.2s;
|
||||||
|
}
|
||||||
|
.waifu-tool span:hover {
|
||||||
|
color: #34495e;
|
||||||
|
}
|
||||||
|
.waifu #live2d{
|
||||||
|
position: relative;
|
||||||
|
}
|
||||||
|
|
||||||
|
@keyframes shake {
|
||||||
|
2% {
|
||||||
|
transform: translate(0.5px, -1.5px) rotate(-0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
4% {
|
||||||
|
transform: translate(0.5px, 1.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
6% {
|
||||||
|
transform: translate(1.5px, 1.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
8% {
|
||||||
|
transform: translate(2.5px, 1.5px) rotate(0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
10% {
|
||||||
|
transform: translate(0.5px, 2.5px) rotate(0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
12% {
|
||||||
|
transform: translate(1.5px, 1.5px) rotate(0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
14% {
|
||||||
|
transform: translate(0.5px, 0.5px) rotate(0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
16% {
|
||||||
|
transform: translate(-1.5px, -0.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
18% {
|
||||||
|
transform: translate(0.5px, 0.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
20% {
|
||||||
|
transform: translate(2.5px, 2.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
22% {
|
||||||
|
transform: translate(0.5px, -1.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
24% {
|
||||||
|
transform: translate(-1.5px, 1.5px) rotate(-0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
26% {
|
||||||
|
transform: translate(1.5px, 0.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
28% {
|
||||||
|
transform: translate(-0.5px, -0.5px) rotate(-0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
30% {
|
||||||
|
transform: translate(1.5px, -0.5px) rotate(-0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
32% {
|
||||||
|
transform: translate(2.5px, -1.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
34% {
|
||||||
|
transform: translate(2.5px, 2.5px) rotate(-0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
36% {
|
||||||
|
transform: translate(0.5px, -1.5px) rotate(0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
38% {
|
||||||
|
transform: translate(2.5px, -0.5px) rotate(-0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
40% {
|
||||||
|
transform: translate(-0.5px, 2.5px) rotate(0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
42% {
|
||||||
|
transform: translate(-1.5px, 2.5px) rotate(0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
44% {
|
||||||
|
transform: translate(-1.5px, 1.5px) rotate(0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
46% {
|
||||||
|
transform: translate(1.5px, -0.5px) rotate(-0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
48% {
|
||||||
|
transform: translate(2.5px, -0.5px) rotate(0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
50% {
|
||||||
|
transform: translate(-1.5px, 1.5px) rotate(0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
52% {
|
||||||
|
transform: translate(-0.5px, 1.5px) rotate(0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
54% {
|
||||||
|
transform: translate(-1.5px, 1.5px) rotate(0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
56% {
|
||||||
|
transform: translate(0.5px, 2.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
58% {
|
||||||
|
transform: translate(2.5px, 2.5px) rotate(0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
60% {
|
||||||
|
transform: translate(2.5px, -1.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
62% {
|
||||||
|
transform: translate(-1.5px, 0.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
64% {
|
||||||
|
transform: translate(-1.5px, 1.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
66% {
|
||||||
|
transform: translate(0.5px, 2.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
68% {
|
||||||
|
transform: translate(2.5px, -1.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
70% {
|
||||||
|
transform: translate(2.5px, 2.5px) rotate(0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
72% {
|
||||||
|
transform: translate(-0.5px, -1.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
74% {
|
||||||
|
transform: translate(-1.5px, 2.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
76% {
|
||||||
|
transform: translate(-1.5px, 2.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
78% {
|
||||||
|
transform: translate(-1.5px, 2.5px) rotate(0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
80% {
|
||||||
|
transform: translate(-1.5px, 0.5px) rotate(-0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
82% {
|
||||||
|
transform: translate(-1.5px, 0.5px) rotate(-0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
84% {
|
||||||
|
transform: translate(-0.5px, 0.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
86% {
|
||||||
|
transform: translate(2.5px, 1.5px) rotate(0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
88% {
|
||||||
|
transform: translate(-1.5px, 0.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
90% {
|
||||||
|
transform: translate(-1.5px, -0.5px) rotate(-0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
92% {
|
||||||
|
transform: translate(-1.5px, -1.5px) rotate(1.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
94% {
|
||||||
|
transform: translate(0.5px, 0.5px) rotate(-0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
96% {
|
||||||
|
transform: translate(2.5px, -0.5px) rotate(-0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
98% {
|
||||||
|
transform: translate(-1.5px, -1.5px) rotate(-0.5deg);
|
||||||
|
}
|
||||||
|
|
||||||
|
0%, 100% {
|
||||||
|
transform: translate(0, 0) rotate(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@font-face {
|
||||||
|
font-family: 'Flat-UI-Icons';
|
||||||
|
src: url('flat-ui-icons-regular.eot');
|
||||||
|
src: url('flat-ui-icons-regular.eot?#iefix') format('embedded-opentype'), url('flat-ui-icons-regular.woff') format('woff'), url('flat-ui-icons-regular.ttf') format('truetype'), url('flat-ui-icons-regular.svg#flat-ui-icons-regular') format('svg');
|
||||||
|
}
|
||||||
|
[class^="fui-"],
|
||||||
|
[class*="fui-"] {
|
||||||
|
font-family: 'Flat-UI-Icons';
|
||||||
|
speak: none;
|
||||||
|
font-style: normal;
|
||||||
|
font-weight: normal;
|
||||||
|
font-variant: normal;
|
||||||
|
text-transform: none;
|
||||||
|
-webkit-font-smoothing: antialiased;
|
||||||
|
-moz-osx-font-smoothing: grayscale;
|
||||||
|
}
|
||||||
|
.fui-cross:before {
|
||||||
|
content: "\e609";
|
||||||
|
}
|
||||||
|
.fui-info-circle:before {
|
||||||
|
content: "\e60f";
|
||||||
|
}
|
||||||
|
.fui-photo:before {
|
||||||
|
content: "\e62a";
|
||||||
|
}
|
||||||
|
.fui-eye:before {
|
||||||
|
content: "\e62c";
|
||||||
|
}
|
||||||
|
.fui-chat:before {
|
||||||
|
content: "\e62d";
|
||||||
|
}
|
||||||
|
.fui-home:before {
|
||||||
|
content: "\e62e";
|
||||||
|
}
|
||||||
|
.fui-user:before {
|
||||||
|
content: "\e631";
|
||||||
|
}
|
||||||
2952
flagged/modeling_moss.py
普通文件
2952
flagged/modeling_moss.py
普通文件
文件差异内容过多而无法显示
加载差异
@@ -8,10 +8,10 @@
|
|||||||
具备多线程调用能力的函数:在函数插件中被调用,灵活而简洁
|
具备多线程调用能力的函数:在函数插件中被调用,灵活而简洁
|
||||||
2. predict_no_ui_long_connection(...)
|
2. predict_no_ui_long_connection(...)
|
||||||
"""
|
"""
|
||||||
import tiktoken, copy, re
|
import tiktoken, copy
|
||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
from toolbox import get_conf, trimmed_format_exc, apply_gpt_academic_string_mask, read_one_api_model_name
|
from toolbox import get_conf, trimmed_format_exc, apply_gpt_academic_string_mask
|
||||||
|
|
||||||
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
|
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
|
||||||
from .bridge_chatgpt import predict as chatgpt_ui
|
from .bridge_chatgpt import predict as chatgpt_ui
|
||||||
@@ -34,9 +34,6 @@ from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui
|
|||||||
from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui
|
from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui
|
||||||
from .bridge_zhipu import predict as zhipu_ui
|
from .bridge_zhipu import predict as zhipu_ui
|
||||||
|
|
||||||
from .bridge_cohere import predict as cohere_ui
|
|
||||||
from .bridge_cohere import predict_no_ui_long_connection as cohere_noui
|
|
||||||
|
|
||||||
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
|
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
|
||||||
|
|
||||||
class LazyloadTiktoken(object):
|
class LazyloadTiktoken(object):
|
||||||
@@ -64,12 +61,6 @@ API_URL_REDIRECT, AZURE_ENDPOINT, AZURE_ENGINE = get_conf("API_URL_REDIRECT", "A
|
|||||||
openai_endpoint = "https://api.openai.com/v1/chat/completions"
|
openai_endpoint = "https://api.openai.com/v1/chat/completions"
|
||||||
api2d_endpoint = "https://openai.api2d.net/v1/chat/completions"
|
api2d_endpoint = "https://openai.api2d.net/v1/chat/completions"
|
||||||
newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub"
|
newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub"
|
||||||
gemini_endpoint = "https://generativelanguage.googleapis.com/v1beta/models"
|
|
||||||
claude_endpoint = "https://api.anthropic.com/v1/messages"
|
|
||||||
yimodel_endpoint = "https://api.lingyiwanwu.com/v1/chat/completions"
|
|
||||||
cohere_endpoint = "https://api.cohere.ai/v1/chat"
|
|
||||||
ollama_endpoint = "http://localhost:11434/api/chat"
|
|
||||||
|
|
||||||
if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/'
|
if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/'
|
||||||
azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15'
|
azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15'
|
||||||
# 兼容旧版的配置
|
# 兼容旧版的配置
|
||||||
@@ -84,11 +75,7 @@ except:
|
|||||||
if openai_endpoint in API_URL_REDIRECT: openai_endpoint = API_URL_REDIRECT[openai_endpoint]
|
if openai_endpoint in API_URL_REDIRECT: openai_endpoint = API_URL_REDIRECT[openai_endpoint]
|
||||||
if api2d_endpoint in API_URL_REDIRECT: api2d_endpoint = API_URL_REDIRECT[api2d_endpoint]
|
if api2d_endpoint in API_URL_REDIRECT: api2d_endpoint = API_URL_REDIRECT[api2d_endpoint]
|
||||||
if newbing_endpoint in API_URL_REDIRECT: newbing_endpoint = API_URL_REDIRECT[newbing_endpoint]
|
if newbing_endpoint in API_URL_REDIRECT: newbing_endpoint = API_URL_REDIRECT[newbing_endpoint]
|
||||||
if gemini_endpoint in API_URL_REDIRECT: gemini_endpoint = API_URL_REDIRECT[gemini_endpoint]
|
|
||||||
if claude_endpoint in API_URL_REDIRECT: claude_endpoint = API_URL_REDIRECT[claude_endpoint]
|
|
||||||
if yimodel_endpoint in API_URL_REDIRECT: yimodel_endpoint = API_URL_REDIRECT[yimodel_endpoint]
|
|
||||||
if cohere_endpoint in API_URL_REDIRECT: cohere_endpoint = API_URL_REDIRECT[cohere_endpoint]
|
|
||||||
if ollama_endpoint in API_URL_REDIRECT: ollama_endpoint = API_URL_REDIRECT[ollama_endpoint]
|
|
||||||
|
|
||||||
# 获取tokenizer
|
# 获取tokenizer
|
||||||
tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
|
tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
|
||||||
@@ -107,7 +94,7 @@ model_info = {
|
|||||||
"fn_with_ui": chatgpt_ui,
|
"fn_with_ui": chatgpt_ui,
|
||||||
"fn_without_ui": chatgpt_noui,
|
"fn_without_ui": chatgpt_noui,
|
||||||
"endpoint": openai_endpoint,
|
"endpoint": openai_endpoint,
|
||||||
"max_token": 16385,
|
"max_token": 4096,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
@@ -148,15 +135,6 @@ model_info = {
|
|||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
|
|
||||||
"gpt-3.5-turbo-0125": { #16k
|
|
||||||
"fn_with_ui": chatgpt_ui,
|
|
||||||
"fn_without_ui": chatgpt_noui,
|
|
||||||
"endpoint": openai_endpoint,
|
|
||||||
"max_token": 16385,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
|
|
||||||
"gpt-4": {
|
"gpt-4": {
|
||||||
"fn_with_ui": chatgpt_ui,
|
"fn_with_ui": chatgpt_ui,
|
||||||
"fn_without_ui": chatgpt_noui,
|
"fn_without_ui": chatgpt_noui,
|
||||||
@@ -202,25 +180,6 @@ model_info = {
|
|||||||
"token_cnt": get_token_num_gpt4,
|
"token_cnt": get_token_num_gpt4,
|
||||||
},
|
},
|
||||||
|
|
||||||
"gpt-4-turbo": {
|
|
||||||
"fn_with_ui": chatgpt_ui,
|
|
||||||
"fn_without_ui": chatgpt_noui,
|
|
||||||
"endpoint": openai_endpoint,
|
|
||||||
"max_token": 128000,
|
|
||||||
"tokenizer": tokenizer_gpt4,
|
|
||||||
"token_cnt": get_token_num_gpt4,
|
|
||||||
},
|
|
||||||
|
|
||||||
"gpt-4-turbo-2024-04-09": {
|
|
||||||
"fn_with_ui": chatgpt_ui,
|
|
||||||
"fn_without_ui": chatgpt_noui,
|
|
||||||
"endpoint": openai_endpoint,
|
|
||||||
"max_token": 128000,
|
|
||||||
"tokenizer": tokenizer_gpt4,
|
|
||||||
"token_cnt": get_token_num_gpt4,
|
|
||||||
},
|
|
||||||
|
|
||||||
|
|
||||||
"gpt-3.5-random": {
|
"gpt-3.5-random": {
|
||||||
"fn_with_ui": chatgpt_ui,
|
"fn_with_ui": chatgpt_ui,
|
||||||
"fn_without_ui": chatgpt_noui,
|
"fn_without_ui": chatgpt_noui,
|
||||||
@@ -268,14 +227,6 @@ model_info = {
|
|||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
"glm-4v": {
|
|
||||||
"fn_with_ui": zhipu_ui,
|
|
||||||
"fn_without_ui": zhipu_noui,
|
|
||||||
"endpoint": None,
|
|
||||||
"max_token": 1000,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
"glm-3-turbo": {
|
"glm-3-turbo": {
|
||||||
"fn_with_ui": zhipu_ui,
|
"fn_with_ui": zhipu_ui,
|
||||||
"fn_without_ui": zhipu_noui,
|
"fn_without_ui": zhipu_noui,
|
||||||
@@ -331,7 +282,7 @@ model_info = {
|
|||||||
"gemini-pro": {
|
"gemini-pro": {
|
||||||
"fn_with_ui": genai_ui,
|
"fn_with_ui": genai_ui,
|
||||||
"fn_without_ui": genai_noui,
|
"fn_without_ui": genai_noui,
|
||||||
"endpoint": gemini_endpoint,
|
"endpoint": None,
|
||||||
"max_token": 1024 * 32,
|
"max_token": 1024 * 32,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
@@ -339,56 +290,13 @@ model_info = {
|
|||||||
"gemini-pro-vision": {
|
"gemini-pro-vision": {
|
||||||
"fn_with_ui": genai_ui,
|
"fn_with_ui": genai_ui,
|
||||||
"fn_without_ui": genai_noui,
|
"fn_without_ui": genai_noui,
|
||||||
"endpoint": gemini_endpoint,
|
|
||||||
"max_token": 1024 * 32,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
|
|
||||||
# cohere
|
|
||||||
"cohere-command-r-plus": {
|
|
||||||
"fn_with_ui": cohere_ui,
|
|
||||||
"fn_without_ui": cohere_noui,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": cohere_endpoint,
|
|
||||||
"max_token": 1024 * 4,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
|
|
||||||
}
|
|
||||||
# -=-=-=-=-=-=- 月之暗面 -=-=-=-=-=-=-
|
|
||||||
from request_llms.bridge_moonshot import predict as moonshot_ui
|
|
||||||
from request_llms.bridge_moonshot import predict_no_ui_long_connection as moonshot_no_ui
|
|
||||||
model_info.update({
|
|
||||||
"moonshot-v1-8k": {
|
|
||||||
"fn_with_ui": moonshot_ui,
|
|
||||||
"fn_without_ui": moonshot_no_ui,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": None,
|
|
||||||
"max_token": 1024 * 8,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
"moonshot-v1-32k": {
|
|
||||||
"fn_with_ui": moonshot_ui,
|
|
||||||
"fn_without_ui": moonshot_no_ui,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 1024 * 32,
|
"max_token": 1024 * 32,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
"moonshot-v1-128k": {
|
|
||||||
"fn_with_ui": moonshot_ui,
|
|
||||||
"fn_without_ui": moonshot_no_ui,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": None,
|
|
||||||
"max_token": 1024 * 128,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
}
|
}
|
||||||
})
|
|
||||||
# -=-=-=-=-=-=- api2d 对齐支持 -=-=-=-=-=-=-
|
# -=-=-=-=-=-=- api2d 对齐支持 -=-=-=-=-=-=-
|
||||||
for model in AVAIL_LLM_MODELS:
|
for model in AVAIL_LLM_MODELS:
|
||||||
if model.startswith('api2d-') and (model.replace('api2d-','') in model_info.keys()):
|
if model.startswith('api2d-') and (model.replace('api2d-','') in model_info.keys()):
|
||||||
@@ -404,67 +312,25 @@ for model in AVAIL_LLM_MODELS:
|
|||||||
model_info.update({model: mi})
|
model_info.update({model: mi})
|
||||||
|
|
||||||
# -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=-
|
# -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=-
|
||||||
# claude家族
|
if "claude-1-100k" in AVAIL_LLM_MODELS or "claude-2" in AVAIL_LLM_MODELS:
|
||||||
claude_models = ["claude-instant-1.2","claude-2.0","claude-2.1","claude-3-haiku-20240307","claude-3-sonnet-20240229","claude-3-opus-20240229"]
|
|
||||||
if any(item in claude_models for item in AVAIL_LLM_MODELS):
|
|
||||||
from .bridge_claude import predict_no_ui_long_connection as claude_noui
|
from .bridge_claude import predict_no_ui_long_connection as claude_noui
|
||||||
from .bridge_claude import predict as claude_ui
|
from .bridge_claude import predict as claude_ui
|
||||||
model_info.update({
|
model_info.update({
|
||||||
"claude-instant-1.2": {
|
"claude-1-100k": {
|
||||||
"fn_with_ui": claude_ui,
|
"fn_with_ui": claude_ui,
|
||||||
"fn_without_ui": claude_noui,
|
"fn_without_ui": claude_noui,
|
||||||
"endpoint": claude_endpoint,
|
"endpoint": None,
|
||||||
"max_token": 100000,
|
"max_token": 8196,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
model_info.update({
|
model_info.update({
|
||||||
"claude-2.0": {
|
"claude-2": {
|
||||||
"fn_with_ui": claude_ui,
|
"fn_with_ui": claude_ui,
|
||||||
"fn_without_ui": claude_noui,
|
"fn_without_ui": claude_noui,
|
||||||
"endpoint": claude_endpoint,
|
"endpoint": None,
|
||||||
"max_token": 100000,
|
"max_token": 8196,
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
model_info.update({
|
|
||||||
"claude-2.1": {
|
|
||||||
"fn_with_ui": claude_ui,
|
|
||||||
"fn_without_ui": claude_noui,
|
|
||||||
"endpoint": claude_endpoint,
|
|
||||||
"max_token": 200000,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
model_info.update({
|
|
||||||
"claude-3-haiku-20240307": {
|
|
||||||
"fn_with_ui": claude_ui,
|
|
||||||
"fn_without_ui": claude_noui,
|
|
||||||
"endpoint": claude_endpoint,
|
|
||||||
"max_token": 200000,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
model_info.update({
|
|
||||||
"claude-3-sonnet-20240229": {
|
|
||||||
"fn_with_ui": claude_ui,
|
|
||||||
"fn_without_ui": claude_noui,
|
|
||||||
"endpoint": claude_endpoint,
|
|
||||||
"max_token": 200000,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
model_info.update({
|
|
||||||
"claude-3-opus-20240229": {
|
|
||||||
"fn_with_ui": claude_ui,
|
|
||||||
"fn_without_ui": claude_noui,
|
|
||||||
"endpoint": claude_endpoint,
|
|
||||||
"max_token": 200000,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
@@ -534,6 +400,22 @@ if "stack-claude" in AVAIL_LLM_MODELS:
|
|||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
if "newbing-free" in AVAIL_LLM_MODELS:
|
||||||
|
try:
|
||||||
|
from .bridge_newbingfree import predict_no_ui_long_connection as newbingfree_noui
|
||||||
|
from .bridge_newbingfree import predict as newbingfree_ui
|
||||||
|
model_info.update({
|
||||||
|
"newbing-free": {
|
||||||
|
"fn_with_ui": newbingfree_ui,
|
||||||
|
"fn_without_ui": newbingfree_noui,
|
||||||
|
"endpoint": newbing_endpoint,
|
||||||
|
"max_token": 4096,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
except:
|
||||||
|
print(trimmed_format_exc())
|
||||||
if "newbing" in AVAIL_LLM_MODELS: # same with newbing-free
|
if "newbing" in AVAIL_LLM_MODELS: # same with newbing-free
|
||||||
try:
|
try:
|
||||||
from .bridge_newbingfree import predict_no_ui_long_connection as newbingfree_noui
|
from .bridge_newbingfree import predict_no_ui_long_connection as newbingfree_noui
|
||||||
@@ -566,7 +448,6 @@ if "chatglmft" in AVAIL_LLM_MODELS: # same with newbing-free
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
# -=-=-=-=-=-=- 上海AI-LAB书生大模型 -=-=-=-=-=-=-
|
|
||||||
if "internlm" in AVAIL_LLM_MODELS:
|
if "internlm" in AVAIL_LLM_MODELS:
|
||||||
try:
|
try:
|
||||||
from .bridge_internlm import predict_no_ui_long_connection as internlm_noui
|
from .bridge_internlm import predict_no_ui_long_connection as internlm_noui
|
||||||
@@ -599,7 +480,6 @@ if "chatglm_onnx" in AVAIL_LLM_MODELS:
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
# -=-=-=-=-=-=- 通义-本地模型 -=-=-=-=-=-=-
|
|
||||||
if "qwen-local" in AVAIL_LLM_MODELS:
|
if "qwen-local" in AVAIL_LLM_MODELS:
|
||||||
try:
|
try:
|
||||||
from .bridge_qwen_local import predict_no_ui_long_connection as qwen_local_noui
|
from .bridge_qwen_local import predict_no_ui_long_connection as qwen_local_noui
|
||||||
@@ -608,7 +488,6 @@ if "qwen-local" in AVAIL_LLM_MODELS:
|
|||||||
"qwen-local": {
|
"qwen-local": {
|
||||||
"fn_with_ui": qwen_local_ui,
|
"fn_with_ui": qwen_local_ui,
|
||||||
"fn_without_ui": qwen_local_noui,
|
"fn_without_ui": qwen_local_noui,
|
||||||
"can_multi_thread": False,
|
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 4096,
|
"max_token": 4096,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
@@ -617,7 +496,6 @@ if "qwen-local" in AVAIL_LLM_MODELS:
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
# -=-=-=-=-=-=- 通义-在线模型 -=-=-=-=-=-=-
|
|
||||||
if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-max" in AVAIL_LLM_MODELS: # zhipuai
|
if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-max" in AVAIL_LLM_MODELS: # zhipuai
|
||||||
try:
|
try:
|
||||||
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
|
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
|
||||||
@@ -626,7 +504,6 @@ if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-
|
|||||||
"qwen-turbo": {
|
"qwen-turbo": {
|
||||||
"fn_with_ui": qwen_ui,
|
"fn_with_ui": qwen_ui,
|
||||||
"fn_without_ui": qwen_noui,
|
"fn_without_ui": qwen_noui,
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 6144,
|
"max_token": 6144,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
@@ -635,7 +512,6 @@ if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-
|
|||||||
"qwen-plus": {
|
"qwen-plus": {
|
||||||
"fn_with_ui": qwen_ui,
|
"fn_with_ui": qwen_ui,
|
||||||
"fn_without_ui": qwen_noui,
|
"fn_without_ui": qwen_noui,
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 30720,
|
"max_token": 30720,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
@@ -644,7 +520,6 @@ if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-
|
|||||||
"qwen-max": {
|
"qwen-max": {
|
||||||
"fn_with_ui": qwen_ui,
|
"fn_with_ui": qwen_ui,
|
||||||
"fn_without_ui": qwen_noui,
|
"fn_without_ui": qwen_noui,
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 28672,
|
"max_token": 28672,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
@@ -653,35 +528,7 @@ if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
# -=-=-=-=-=-=- 零一万物模型 -=-=-=-=-=-=-
|
if "spark" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
||||||
if "yi-34b-chat-0205" in AVAIL_LLM_MODELS or "yi-34b-chat-200k" in AVAIL_LLM_MODELS: # zhipuai
|
|
||||||
try:
|
|
||||||
from .bridge_yimodel import predict_no_ui_long_connection as yimodel_noui
|
|
||||||
from .bridge_yimodel import predict as yimodel_ui
|
|
||||||
model_info.update({
|
|
||||||
"yi-34b-chat-0205": {
|
|
||||||
"fn_with_ui": yimodel_ui,
|
|
||||||
"fn_without_ui": yimodel_noui,
|
|
||||||
"can_multi_thread": False, # 目前来说,默认情况下并发量极低,因此禁用
|
|
||||||
"endpoint": yimodel_endpoint,
|
|
||||||
"max_token": 4000,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
"yi-34b-chat-200k": {
|
|
||||||
"fn_with_ui": yimodel_ui,
|
|
||||||
"fn_without_ui": yimodel_noui,
|
|
||||||
"can_multi_thread": False, # 目前来说,默认情况下并发量极低,因此禁用
|
|
||||||
"endpoint": yimodel_endpoint,
|
|
||||||
"max_token": 200000,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
except:
|
|
||||||
print(trimmed_format_exc())
|
|
||||||
# -=-=-=-=-=-=- 讯飞星火认知大模型 -=-=-=-=-=-=-
|
|
||||||
if "spark" in AVAIL_LLM_MODELS:
|
|
||||||
try:
|
try:
|
||||||
from .bridge_spark import predict_no_ui_long_connection as spark_noui
|
from .bridge_spark import predict_no_ui_long_connection as spark_noui
|
||||||
from .bridge_spark import predict as spark_ui
|
from .bridge_spark import predict as spark_ui
|
||||||
@@ -689,7 +536,6 @@ if "spark" in AVAIL_LLM_MODELS:
|
|||||||
"spark": {
|
"spark": {
|
||||||
"fn_with_ui": spark_ui,
|
"fn_with_ui": spark_ui,
|
||||||
"fn_without_ui": spark_noui,
|
"fn_without_ui": spark_noui,
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 4096,
|
"max_token": 4096,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
@@ -706,7 +552,6 @@ if "sparkv2" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
|||||||
"sparkv2": {
|
"sparkv2": {
|
||||||
"fn_with_ui": spark_ui,
|
"fn_with_ui": spark_ui,
|
||||||
"fn_without_ui": spark_noui,
|
"fn_without_ui": spark_noui,
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 4096,
|
"max_token": 4096,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
@@ -723,7 +568,6 @@ if "sparkv3" in AVAIL_LLM_MODELS or "sparkv3.5" in AVAIL_LLM_MODELS: # 讯飞
|
|||||||
"sparkv3": {
|
"sparkv3": {
|
||||||
"fn_with_ui": spark_ui,
|
"fn_with_ui": spark_ui,
|
||||||
"fn_without_ui": spark_noui,
|
"fn_without_ui": spark_noui,
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 4096,
|
"max_token": 4096,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
@@ -732,7 +576,6 @@ if "sparkv3" in AVAIL_LLM_MODELS or "sparkv3.5" in AVAIL_LLM_MODELS: # 讯飞
|
|||||||
"sparkv3.5": {
|
"sparkv3.5": {
|
||||||
"fn_with_ui": spark_ui,
|
"fn_with_ui": spark_ui,
|
||||||
"fn_without_ui": spark_noui,
|
"fn_without_ui": spark_noui,
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 4096,
|
"max_token": 4096,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
@@ -757,7 +600,6 @@ if "llama2" in AVAIL_LLM_MODELS: # llama2
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
# -=-=-=-=-=-=- 智谱 -=-=-=-=-=-=-
|
|
||||||
if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai 是glm-4的别名,向后兼容配置
|
if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai 是glm-4的别名,向后兼容配置
|
||||||
try:
|
try:
|
||||||
model_info.update({
|
model_info.update({
|
||||||
@@ -772,7 +614,6 @@ if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai 是glm-4的别名,向后兼容
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
# -=-=-=-=-=-=- 幻方-深度求索大模型 -=-=-=-=-=-=-
|
|
||||||
if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
||||||
try:
|
try:
|
||||||
from .bridge_deepseekcoder import predict_no_ui_long_connection as deepseekcoder_noui
|
from .bridge_deepseekcoder import predict_no_ui_long_connection as deepseekcoder_noui
|
||||||
@@ -789,83 +630,26 @@ if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
|
# if "skylark" in AVAIL_LLM_MODELS:
|
||||||
|
# try:
|
||||||
|
# from .bridge_skylark2 import predict_no_ui_long_connection as skylark_noui
|
||||||
|
# from .bridge_skylark2 import predict as skylark_ui
|
||||||
|
# model_info.update({
|
||||||
|
# "skylark": {
|
||||||
|
# "fn_with_ui": skylark_ui,
|
||||||
|
# "fn_without_ui": skylark_noui,
|
||||||
|
# "endpoint": None,
|
||||||
|
# "max_token": 4096,
|
||||||
|
# "tokenizer": tokenizer_gpt35,
|
||||||
|
# "token_cnt": get_token_num_gpt35,
|
||||||
|
# }
|
||||||
|
# })
|
||||||
|
# except:
|
||||||
|
# print(trimmed_format_exc())
|
||||||
|
|
||||||
|
|
||||||
# -=-=-=-=-=-=- one-api 对齐支持 -=-=-=-=-=-=-
|
# <-- 用于定义和切换多个azure模型 -->
|
||||||
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("one-api-")]:
|
AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY")
|
||||||
# 为了更灵活地接入one-api多模型管理界面,设计了此接口,例子:AVAIL_LLM_MODELS = ["one-api-mixtral-8x7b(max_token=6666)"]
|
|
||||||
# 其中
|
|
||||||
# "one-api-" 是前缀(必要)
|
|
||||||
# "mixtral-8x7b" 是模型名(必要)
|
|
||||||
# "(max_token=6666)" 是配置(非必要)
|
|
||||||
try:
|
|
||||||
_, max_token_tmp = read_one_api_model_name(model)
|
|
||||||
except:
|
|
||||||
print(f"one-api模型 {model} 的 max_token 配置不是整数,请检查配置文件。")
|
|
||||||
continue
|
|
||||||
model_info.update({
|
|
||||||
model: {
|
|
||||||
"fn_with_ui": chatgpt_ui,
|
|
||||||
"fn_without_ui": chatgpt_noui,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": openai_endpoint,
|
|
||||||
"max_token": max_token_tmp,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
# -=-=-=-=-=-=- vllm 对齐支持 -=-=-=-=-=-=-
|
|
||||||
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("vllm-")]:
|
|
||||||
# 为了更灵活地接入vllm多模型管理界面,设计了此接口,例子:AVAIL_LLM_MODELS = ["vllm-/home/hmp/llm/cache/Qwen1___5-32B-Chat(max_token=6666)"]
|
|
||||||
# 其中
|
|
||||||
# "vllm-" 是前缀(必要)
|
|
||||||
# "mixtral-8x7b" 是模型名(必要)
|
|
||||||
# "(max_token=6666)" 是配置(非必要)
|
|
||||||
try:
|
|
||||||
_, max_token_tmp = read_one_api_model_name(model)
|
|
||||||
except:
|
|
||||||
print(f"vllm模型 {model} 的 max_token 配置不是整数,请检查配置文件。")
|
|
||||||
continue
|
|
||||||
model_info.update({
|
|
||||||
model: {
|
|
||||||
"fn_with_ui": chatgpt_ui,
|
|
||||||
"fn_without_ui": chatgpt_noui,
|
|
||||||
"can_multi_thread": True,
|
|
||||||
"endpoint": openai_endpoint,
|
|
||||||
"max_token": max_token_tmp,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
# -=-=-=-=-=-=- ollama 对齐支持 -=-=-=-=-=-=-
|
|
||||||
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("ollama-")]:
|
|
||||||
from .bridge_ollama import predict_no_ui_long_connection as ollama_noui
|
|
||||||
from .bridge_ollama import predict as ollama_ui
|
|
||||||
break
|
|
||||||
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("ollama-")]:
|
|
||||||
# 为了更灵活地接入ollama多模型管理界面,设计了此接口,例子:AVAIL_LLM_MODELS = ["ollama-phi3(max_token=6666)"]
|
|
||||||
# 其中
|
|
||||||
# "ollama-" 是前缀(必要)
|
|
||||||
# "phi3" 是模型名(必要)
|
|
||||||
# "(max_token=6666)" 是配置(非必要)
|
|
||||||
try:
|
|
||||||
_, max_token_tmp = read_one_api_model_name(model)
|
|
||||||
except:
|
|
||||||
print(f"ollama模型 {model} 的 max_token 配置不是整数,请检查配置文件。")
|
|
||||||
continue
|
|
||||||
model_info.update({
|
|
||||||
model: {
|
|
||||||
"fn_with_ui": ollama_ui,
|
|
||||||
"fn_without_ui": ollama_noui,
|
|
||||||
"endpoint": ollama_endpoint,
|
|
||||||
"max_token": max_token_tmp,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
# -=-=-=-=-=-=- azure模型对齐支持 -=-=-=-=-=-=-
|
|
||||||
AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY") # <-- 用于定义和切换多个azure模型 -->
|
|
||||||
if len(AZURE_CFG_ARRAY) > 0:
|
if len(AZURE_CFG_ARRAY) > 0:
|
||||||
for azure_model_name, azure_cfg_dict in AZURE_CFG_ARRAY.items():
|
for azure_model_name, azure_cfg_dict in AZURE_CFG_ARRAY.items():
|
||||||
# 可能会覆盖之前的配置,但这是意料之中的
|
# 可能会覆盖之前的配置,但这是意料之中的
|
||||||
@@ -894,7 +678,7 @@ def LLM_CATCH_EXCEPTION(f):
|
|||||||
"""
|
"""
|
||||||
装饰器函数,将错误显示出来
|
装饰器函数,将错误显示出来
|
||||||
"""
|
"""
|
||||||
def decorated(inputs:str, llm_kwargs:dict, history:list, sys_prompt:str, observe_window:list, console_slience:bool):
|
def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience):
|
||||||
try:
|
try:
|
||||||
return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -904,9 +688,9 @@ def LLM_CATCH_EXCEPTION(f):
|
|||||||
return decorated
|
return decorated
|
||||||
|
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys_prompt:str, observe_window:list=[], console_slience:bool=False):
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window=[], console_slience=False):
|
||||||
"""
|
"""
|
||||||
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部(尽可能地)用stream的方法避免中途网线被掐。
|
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||||
inputs:
|
inputs:
|
||||||
是本次问询的输入
|
是本次问询的输入
|
||||||
sys_prompt:
|
sys_prompt:
|
||||||
@@ -924,6 +708,7 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys
|
|||||||
model = llm_kwargs['llm_model']
|
model = llm_kwargs['llm_model']
|
||||||
n_model = 1
|
n_model = 1
|
||||||
if '&' not in model:
|
if '&' not in model:
|
||||||
|
assert not model.startswith("tgui"), "TGUI不支持函数插件的实现"
|
||||||
|
|
||||||
# 如果只询问1个大语言模型:
|
# 如果只询问1个大语言模型:
|
||||||
method = model_info[model]["fn_without_ui"]
|
method = model_info[model]["fn_without_ui"]
|
||||||
@@ -958,8 +743,7 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys
|
|||||||
# 观察窗(window)
|
# 观察窗(window)
|
||||||
chat_string = []
|
chat_string = []
|
||||||
for i in range(n_model):
|
for i in range(n_model):
|
||||||
color = colors[i%len(colors)]
|
chat_string.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {window_mutex[i][0]} </font>" )
|
||||||
chat_string.append( f"【{str(models[i])} 说】: <font color=\"{color}\"> {window_mutex[i][0]} </font>" )
|
|
||||||
res = '<br/><br/>\n\n---\n\n'.join(chat_string)
|
res = '<br/><br/>\n\n---\n\n'.join(chat_string)
|
||||||
# # # # # # # # # # #
|
# # # # # # # # # # #
|
||||||
observe_window[0] = res
|
observe_window[0] = res
|
||||||
@@ -976,30 +760,22 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys
|
|||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
for i, future in enumerate(futures): # wait and get
|
for i, future in enumerate(futures): # wait and get
|
||||||
color = colors[i%len(colors)]
|
return_string_collect.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {future.result()} </font>" )
|
||||||
return_string_collect.append( f"【{str(models[i])} 说】: <font color=\"{color}\"> {future.result()} </font>" )
|
|
||||||
|
|
||||||
window_mutex[-1] = False # stop mutex thread
|
window_mutex[-1] = False # stop mutex thread
|
||||||
res = '<br/><br/>\n\n---\n\n'.join(return_string_collect)
|
res = '<br/><br/>\n\n---\n\n'.join(return_string_collect)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
def predict(inputs:str, llm_kwargs:dict, *args, **kwargs):
|
def predict(inputs, llm_kwargs, *args, **kwargs):
|
||||||
"""
|
"""
|
||||||
发送至LLM,流式获取输出。
|
发送至LLM,流式获取输出。
|
||||||
用于基础的对话功能。
|
用于基础的对话功能。
|
||||||
|
inputs 是本次问询的输入
|
||||||
完整参数列表:
|
top_p, temperature是LLM的内部调优参数
|
||||||
predict(
|
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
||||||
inputs:str, # 是本次问询的输入
|
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||||
llm_kwargs:dict, # 是LLM的内部调优参数
|
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||||
plugin_kwargs:dict, # 是插件的内部参数
|
|
||||||
chatbot:ChatBotWithCookies, # 原样传递,负责向用户前端展示对话,兼顾前端状态的功能
|
|
||||||
history:list=[], # 是之前的对话列表
|
|
||||||
system_prompt:str='', # 系统静默prompt
|
|
||||||
stream:bool=True, # 是否流式输出(已弃用)
|
|
||||||
additional_fn:str=None # 基础功能区按钮的附加功能
|
|
||||||
):
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
|
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ from toolbox import get_conf, ProxyNetworkActivate
|
|||||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 Local Model
|
# 🔌💻 Local Model
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
@@ -22,45 +23,20 @@ class GetGLM3Handle(LocalLLMHandle):
|
|||||||
import os, glob
|
import os, glob
|
||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
|
LOCAL_MODEL_QUANT, device = get_conf('LOCAL_MODEL_QUANT', 'LOCAL_MODEL_DEVICE')
|
||||||
|
|
||||||
LOCAL_MODEL_QUANT, device = get_conf("LOCAL_MODEL_QUANT", "LOCAL_MODEL_DEVICE")
|
if LOCAL_MODEL_QUANT == "INT4": # INT4
|
||||||
_model_name_ = "THUDM/chatglm3-6b"
|
_model_name_ = "THUDM/chatglm3-6b-int4"
|
||||||
# if LOCAL_MODEL_QUANT == "INT4": # INT4
|
|
||||||
# _model_name_ = "THUDM/chatglm3-6b-int4"
|
|
||||||
# elif LOCAL_MODEL_QUANT == "INT8": # INT8
|
|
||||||
# _model_name_ = "THUDM/chatglm3-6b-int8"
|
|
||||||
# else:
|
|
||||||
# _model_name_ = "THUDM/chatglm3-6b" # FP16
|
|
||||||
with ProxyNetworkActivate("Download_LLM"):
|
|
||||||
chatglm_tokenizer = AutoTokenizer.from_pretrained(
|
|
||||||
_model_name_, trust_remote_code=True
|
|
||||||
)
|
|
||||||
if device == "cpu":
|
|
||||||
chatglm_model = AutoModel.from_pretrained(
|
|
||||||
_model_name_,
|
|
||||||
trust_remote_code=True,
|
|
||||||
device="cpu",
|
|
||||||
).float()
|
|
||||||
elif LOCAL_MODEL_QUANT == "INT4": # INT4
|
|
||||||
chatglm_model = AutoModel.from_pretrained(
|
|
||||||
pretrained_model_name_or_path=_model_name_,
|
|
||||||
trust_remote_code=True,
|
|
||||||
device="cuda",
|
|
||||||
load_in_4bit=True,
|
|
||||||
)
|
|
||||||
elif LOCAL_MODEL_QUANT == "INT8": # INT8
|
elif LOCAL_MODEL_QUANT == "INT8": # INT8
|
||||||
chatglm_model = AutoModel.from_pretrained(
|
_model_name_ = "THUDM/chatglm3-6b-int8"
|
||||||
pretrained_model_name_or_path=_model_name_,
|
|
||||||
trust_remote_code=True,
|
|
||||||
device="cuda",
|
|
||||||
load_in_8bit=True,
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
chatglm_model = AutoModel.from_pretrained(
|
_model_name_ = "THUDM/chatglm3-6b" # FP16
|
||||||
pretrained_model_name_or_path=_model_name_,
|
with ProxyNetworkActivate('Download_LLM'):
|
||||||
trust_remote_code=True,
|
chatglm_tokenizer = AutoTokenizer.from_pretrained(_model_name_, trust_remote_code=True)
|
||||||
device="cuda",
|
if device=='cpu':
|
||||||
)
|
chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True, device='cpu').float()
|
||||||
|
else:
|
||||||
|
chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True, device='cuda')
|
||||||
chatglm_model = chatglm_model.eval()
|
chatglm_model = chatglm_model.eval()
|
||||||
|
|
||||||
self._model = chatglm_model
|
self._model = chatglm_model
|
||||||
@@ -70,17 +46,16 @@ class GetGLM3Handle(LocalLLMHandle):
|
|||||||
def llm_stream_generator(self, **kwargs):
|
def llm_stream_generator(self, **kwargs):
|
||||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||||
def adaptor(kwargs):
|
def adaptor(kwargs):
|
||||||
query = kwargs["query"]
|
query = kwargs['query']
|
||||||
max_length = kwargs["max_length"]
|
max_length = kwargs['max_length']
|
||||||
top_p = kwargs["top_p"]
|
top_p = kwargs['top_p']
|
||||||
temperature = kwargs["temperature"]
|
temperature = kwargs['temperature']
|
||||||
history = kwargs["history"]
|
history = kwargs['history']
|
||||||
return query, max_length, top_p, temperature, history
|
return query, max_length, top_p, temperature, history
|
||||||
|
|
||||||
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
||||||
|
|
||||||
for response, history in self._model.stream_chat(
|
for response, history in self._model.stream_chat(self._tokenizer,
|
||||||
self._tokenizer,
|
|
||||||
query,
|
query,
|
||||||
history,
|
history,
|
||||||
max_length=max_length,
|
max_length=max_length,
|
||||||
@@ -93,13 +68,10 @@ class GetGLM3Handle(LocalLLMHandle):
|
|||||||
# import something that will raise error if the user does not install requirement_*.txt
|
# import something that will raise error if the user does not install requirement_*.txt
|
||||||
# 🏃♂️🏃♂️🏃♂️ 主进程执行
|
# 🏃♂️🏃♂️🏃♂️ 主进程执行
|
||||||
import importlib
|
import importlib
|
||||||
|
|
||||||
# importlib.import_module('modelscope')
|
# importlib.import_module('modelscope')
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 GPT-Academic Interface
|
# 🔌💻 GPT-Academic Interface
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(
|
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetGLM3Handle, model_name, history_format='chatglm3')
|
||||||
GetGLM3Handle, model_name, history_format="chatglm3"
|
|
||||||
)
|
|
||||||
@@ -137,8 +137,7 @@ class GetGLMFTHandle(Process):
|
|||||||
global glmft_handle
|
global glmft_handle
|
||||||
glmft_handle = None
|
glmft_handle = None
|
||||||
#################################################################################
|
#################################################################################
|
||||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||||
observe_window:list=[], console_slience:bool=False):
|
|
||||||
"""
|
"""
|
||||||
多线程方法
|
多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
|
|||||||
@@ -21,9 +21,7 @@ import random
|
|||||||
|
|
||||||
# config_private.py放自己的秘密如API和代理网址
|
# config_private.py放自己的秘密如API和代理网址
|
||||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||||
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history
|
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc, is_the_upload_folder
|
||||||
from toolbox import trimmed_format_exc, is_the_upload_folder, read_one_api_model_name, log_chat
|
|
||||||
from toolbox import ChatBotWithCookies
|
|
||||||
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
||||||
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY')
|
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY')
|
||||||
|
|
||||||
@@ -70,7 +68,7 @@ def verify_endpoint(endpoint):
|
|||||||
raise ValueError("Endpoint不正确, 请检查AZURE_ENDPOINT的配置! 当前的Endpoint为:" + endpoint)
|
raise ValueError("Endpoint不正确, 请检查AZURE_ENDPOINT的配置! 当前的Endpoint为:" + endpoint)
|
||||||
return endpoint
|
return endpoint
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=None, console_slience:bool=False):
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||||
"""
|
"""
|
||||||
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||||
inputs:
|
inputs:
|
||||||
@@ -127,9 +125,8 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[],
|
|||||||
json_data = chunkjson['choices'][0]
|
json_data = chunkjson['choices'][0]
|
||||||
delta = json_data["delta"]
|
delta = json_data["delta"]
|
||||||
if len(delta) == 0: break
|
if len(delta) == 0: break
|
||||||
if (not has_content) and has_role: continue
|
if "role" in delta: continue
|
||||||
if (not has_content) and (not has_role): continue # raise RuntimeError("发现不标准的第三方接口:"+delta)
|
if "content" in delta:
|
||||||
if has_content: # has_role = True/False
|
|
||||||
result += delta["content"]
|
result += delta["content"]
|
||||||
if not console_slience: print(delta["content"], end='')
|
if not console_slience: print(delta["content"], end='')
|
||||||
if observe_window is not None:
|
if observe_window is not None:
|
||||||
@@ -148,8 +145,7 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[],
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
|
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||||
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
|
||||||
"""
|
"""
|
||||||
发送至chatGPT,流式获取输出。
|
发送至chatGPT,流式获取输出。
|
||||||
用于基础的对话功能。
|
用于基础的对话功能。
|
||||||
@@ -175,7 +171,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||||
|
|
||||||
raw_input = inputs
|
raw_input = inputs
|
||||||
# logging.info(f'[raw_input] {raw_input}')
|
logging.info(f'[raw_input] {raw_input}')
|
||||||
chatbot.append((inputs, ""))
|
chatbot.append((inputs, ""))
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||||
|
|
||||||
@@ -256,8 +252,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
||||||
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
||||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||||
# logging.info(f'[response] {gpt_replying_buffer}')
|
logging.info(f'[response] {gpt_replying_buffer}')
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
|
||||||
break
|
break
|
||||||
# 处理数据流的主体
|
# 处理数据流的主体
|
||||||
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||||
@@ -269,8 +264,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
# 一些第三方接口的出现这样的错误,兼容一下吧
|
# 一些第三方接口的出现这样的错误,兼容一下吧
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
# 至此已经超出了正常接口应该进入的范围,一些垃圾第三方接口会出现这样的错误
|
# 一些垃圾第三方接口的出现这样的错误
|
||||||
if chunkjson['choices'][0]["delta"]["content"] is None: continue # 一些垃圾第三方接口出现这样的错误,兼容一下吧
|
|
||||||
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
||||||
|
|
||||||
history[-1] = gpt_replying_buffer
|
history[-1] = gpt_replying_buffer
|
||||||
@@ -323,9 +317,6 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
|||||||
if not is_any_api_key(llm_kwargs['api_key']):
|
if not is_any_api_key(llm_kwargs['api_key']):
|
||||||
raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")
|
raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")
|
||||||
|
|
||||||
if llm_kwargs['llm_model'].startswith('vllm-'):
|
|
||||||
api_key = 'no-api-key'
|
|
||||||
else:
|
|
||||||
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
@@ -365,12 +356,7 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
|||||||
model = llm_kwargs['llm_model']
|
model = llm_kwargs['llm_model']
|
||||||
if llm_kwargs['llm_model'].startswith('api2d-'):
|
if llm_kwargs['llm_model'].startswith('api2d-'):
|
||||||
model = llm_kwargs['llm_model'][len('api2d-'):]
|
model = llm_kwargs['llm_model'][len('api2d-'):]
|
||||||
if llm_kwargs['llm_model'].startswith('one-api-'):
|
|
||||||
model = llm_kwargs['llm_model'][len('one-api-'):]
|
|
||||||
model, _ = read_one_api_model_name(model)
|
|
||||||
if llm_kwargs['llm_model'].startswith('vllm-'):
|
|
||||||
model = llm_kwargs['llm_model'][len('vllm-'):]
|
|
||||||
model, _ = read_one_api_model_name(model)
|
|
||||||
if model == "gpt-3.5-random": # 随机选择, 绕过openai访问频率限制
|
if model == "gpt-3.5-random": # 随机选择, 绕过openai访问频率限制
|
||||||
model = random.choice([
|
model = random.choice([
|
||||||
"gpt-3.5-turbo",
|
"gpt-3.5-turbo",
|
||||||
|
|||||||
@@ -9,15 +9,15 @@
|
|||||||
具备多线程调用能力的函数
|
具备多线程调用能力的函数
|
||||||
2. predict_no_ui_long_connection:支持多线程
|
2. predict_no_ui_long_connection:支持多线程
|
||||||
"""
|
"""
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import time
|
|
||||||
import traceback
|
|
||||||
import json
|
import json
|
||||||
|
import time
|
||||||
|
import gradio as gr
|
||||||
|
import logging
|
||||||
|
import traceback
|
||||||
import requests
|
import requests
|
||||||
from toolbox import get_conf, update_ui, trimmed_format_exc, encode_image, every_image_file_in_path, log_chat
|
import importlib
|
||||||
picture_system_prompt = "\n当回复图像时,必须说明正在回复哪张图像。所有图像仅在最后一个问题中提供,即使它们在历史记录中被提及。请使用'这是第X张图像:'的格式来指明您正在描述的是哪张图像。"
|
|
||||||
Claude_3_Models = ["claude-3-haiku-20240307", "claude-3-sonnet-20240229", "claude-3-opus-20240229"]
|
|
||||||
|
|
||||||
# config_private.py放自己的秘密如API和代理网址
|
# config_private.py放自己的秘密如API和代理网址
|
||||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||||
@@ -39,34 +39,6 @@ def get_full_error(chunk, stream_response):
|
|||||||
break
|
break
|
||||||
return chunk
|
return chunk
|
||||||
|
|
||||||
def decode_chunk(chunk):
|
|
||||||
# 提前读取一些信息(用于判断异常)
|
|
||||||
chunk_decoded = chunk.decode()
|
|
||||||
chunkjson = None
|
|
||||||
is_last_chunk = False
|
|
||||||
need_to_pass = False
|
|
||||||
if chunk_decoded.startswith('data:'):
|
|
||||||
try:
|
|
||||||
chunkjson = json.loads(chunk_decoded[6:])
|
|
||||||
except:
|
|
||||||
need_to_pass = True
|
|
||||||
pass
|
|
||||||
elif chunk_decoded.startswith('event:'):
|
|
||||||
try:
|
|
||||||
event_type = chunk_decoded.split(':')[1].strip()
|
|
||||||
if event_type == 'content_block_stop' or event_type == 'message_stop':
|
|
||||||
is_last_chunk = True
|
|
||||||
elif event_type == 'content_block_start' or event_type == 'message_start':
|
|
||||||
need_to_pass = True
|
|
||||||
pass
|
|
||||||
except:
|
|
||||||
need_to_pass = True
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
need_to_pass = True
|
|
||||||
pass
|
|
||||||
return need_to_pass, chunkjson, is_last_chunk
|
|
||||||
|
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||||
"""
|
"""
|
||||||
@@ -82,67 +54,50 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
observe_window = None:
|
observe_window = None:
|
||||||
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
||||||
"""
|
"""
|
||||||
|
from anthropic import Anthropic
|
||||||
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
||||||
|
prompt = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
|
||||||
|
retry = 0
|
||||||
if len(ANTHROPIC_API_KEY) == 0:
|
if len(ANTHROPIC_API_KEY) == 0:
|
||||||
raise RuntimeError("没有设置ANTHROPIC_API_KEY选项")
|
raise RuntimeError("没有设置ANTHROPIC_API_KEY选项")
|
||||||
if inputs == "": inputs = "空空如也的输入栏"
|
|
||||||
headers, message = generate_payload(inputs, llm_kwargs, history, sys_prompt, image_paths=None)
|
|
||||||
retry = 0
|
|
||||||
|
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
# make a POST request to the API endpoint, stream=False
|
# make a POST request to the API endpoint, stream=False
|
||||||
from .bridge_all import model_info
|
from .bridge_all import model_info
|
||||||
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
anthropic = Anthropic(api_key=ANTHROPIC_API_KEY)
|
||||||
response = requests.post(endpoint, headers=headers, json=message,
|
# endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||||
proxies=proxies, stream=True, timeout=TIMEOUT_SECONDS);break
|
# with ProxyNetworkActivate()
|
||||||
except requests.exceptions.ReadTimeout as e:
|
stream = anthropic.completions.create(
|
||||||
|
prompt=prompt,
|
||||||
|
max_tokens_to_sample=4096, # The maximum number of tokens to generate before stopping.
|
||||||
|
model=llm_kwargs['llm_model'],
|
||||||
|
stream=True,
|
||||||
|
temperature = llm_kwargs['temperature']
|
||||||
|
)
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
retry += 1
|
retry += 1
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
if retry > MAX_RETRY: raise TimeoutError
|
if retry > MAX_RETRY: raise TimeoutError
|
||||||
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
||||||
stream_response = response.iter_lines()
|
|
||||||
result = ''
|
result = ''
|
||||||
while True:
|
|
||||||
try: chunk = next(stream_response)
|
|
||||||
except StopIteration:
|
|
||||||
break
|
|
||||||
except requests.exceptions.ConnectionError:
|
|
||||||
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
|
|
||||||
need_to_pass, chunkjson, is_last_chunk = decode_chunk(chunk)
|
|
||||||
if chunk:
|
|
||||||
try:
|
try:
|
||||||
if need_to_pass:
|
for completion in stream:
|
||||||
pass
|
result += completion.completion
|
||||||
elif is_last_chunk:
|
if not console_slience: print(completion.completion, end='')
|
||||||
# logging.info(f'[response] {result}')
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
if chunkjson and chunkjson['type'] == 'content_block_delta':
|
|
||||||
result += chunkjson['delta']['text']
|
|
||||||
print(chunkjson['delta']['text'], end='')
|
|
||||||
if observe_window is not None:
|
if observe_window is not None:
|
||||||
# 观测窗,把已经获取的数据显示出去
|
# 观测窗,把已经获取的数据显示出去
|
||||||
if len(observe_window) >= 1:
|
if len(observe_window) >= 1: observe_window[0] += completion.completion
|
||||||
observe_window[0] += chunkjson['delta']['text']
|
|
||||||
# 看门狗,如果超过期限没有喂狗,则终止
|
# 看门狗,如果超过期限没有喂狗,则终止
|
||||||
if len(observe_window) >= 2:
|
if len(observe_window) >= 2:
|
||||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||||
raise RuntimeError("用户取消了程序。")
|
raise RuntimeError("用户取消了程序。")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
chunk = get_full_error(chunk, stream_response)
|
traceback.print_exc()
|
||||||
chunk_decoded = chunk.decode()
|
|
||||||
error_msg = chunk_decoded
|
|
||||||
print(error_msg)
|
|
||||||
raise RuntimeError("Json解析不合常规")
|
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def make_media_input(history,inputs,image_paths):
|
|
||||||
for image_path in image_paths:
|
|
||||||
inputs = inputs + f'<br/><br/><div align="center"><img src="file={os.path.abspath(image_path)}"></div>'
|
|
||||||
return inputs
|
|
||||||
|
|
||||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||||
"""
|
"""
|
||||||
@@ -154,7 +109,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||||
"""
|
"""
|
||||||
if inputs == "": inputs = "空空如也的输入栏"
|
from anthropic import Anthropic
|
||||||
if len(ANTHROPIC_API_KEY) == 0:
|
if len(ANTHROPIC_API_KEY) == 0:
|
||||||
chatbot.append((inputs, "没有设置ANTHROPIC_API_KEY"))
|
chatbot.append((inputs, "没有设置ANTHROPIC_API_KEY"))
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||||
@@ -164,23 +119,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
from core_functional import handle_core_functionality
|
from core_functional import handle_core_functionality
|
||||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||||
|
|
||||||
have_recent_file, image_paths = every_image_file_in_path(chatbot)
|
raw_input = inputs
|
||||||
if len(image_paths) > 20:
|
logging.info(f'[raw_input] {raw_input}')
|
||||||
chatbot.append((inputs, "图片数量超过api上限(20张)"))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应")
|
|
||||||
return
|
|
||||||
|
|
||||||
if any([llm_kwargs['llm_model'] == model for model in Claude_3_Models]) and have_recent_file:
|
|
||||||
if inputs == "" or inputs == "空空如也的输入栏": inputs = "请描述给出的图片"
|
|
||||||
system_prompt += picture_system_prompt # 由于没有单独的参数保存包含图片的历史,所以只能通过提示词对第几张图片进行定位
|
|
||||||
chatbot.append((make_media_input(history,inputs, image_paths), ""))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
|
||||||
else:
|
|
||||||
chatbot.append((inputs, ""))
|
chatbot.append((inputs, ""))
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||||
|
|
||||||
try:
|
try:
|
||||||
headers, message = generate_payload(inputs, llm_kwargs, history, system_prompt, image_paths)
|
prompt = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
|
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
|
||||||
@@ -193,117 +138,91 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
try:
|
try:
|
||||||
# make a POST request to the API endpoint, stream=True
|
# make a POST request to the API endpoint, stream=True
|
||||||
from .bridge_all import model_info
|
from .bridge_all import model_info
|
||||||
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
anthropic = Anthropic(api_key=ANTHROPIC_API_KEY)
|
||||||
response = requests.post(endpoint, headers=headers, json=message,
|
# endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||||
proxies=proxies, stream=True, timeout=TIMEOUT_SECONDS);break
|
# with ProxyNetworkActivate()
|
||||||
except requests.exceptions.ReadTimeout as e:
|
stream = anthropic.completions.create(
|
||||||
|
prompt=prompt,
|
||||||
|
max_tokens_to_sample=4096, # The maximum number of tokens to generate before stopping.
|
||||||
|
model=llm_kwargs['llm_model'],
|
||||||
|
stream=True,
|
||||||
|
temperature = llm_kwargs['temperature']
|
||||||
|
)
|
||||||
|
|
||||||
|
break
|
||||||
|
except:
|
||||||
retry += 1
|
retry += 1
|
||||||
traceback.print_exc()
|
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
|
||||||
|
retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
|
||||||
if retry > MAX_RETRY: raise TimeoutError
|
if retry > MAX_RETRY: raise TimeoutError
|
||||||
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
|
||||||
stream_response = response.iter_lines()
|
|
||||||
gpt_replying_buffer = ""
|
gpt_replying_buffer = ""
|
||||||
|
|
||||||
while True:
|
for completion in stream:
|
||||||
try: chunk = next(stream_response)
|
|
||||||
except StopIteration:
|
|
||||||
break
|
|
||||||
except requests.exceptions.ConnectionError:
|
|
||||||
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
|
|
||||||
need_to_pass, chunkjson, is_last_chunk = decode_chunk(chunk)
|
|
||||||
if chunk:
|
|
||||||
try:
|
try:
|
||||||
if need_to_pass:
|
gpt_replying_buffer = gpt_replying_buffer + completion.completion
|
||||||
pass
|
|
||||||
elif is_last_chunk:
|
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
|
||||||
# logging.info(f'[response] {gpt_replying_buffer}')
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
if chunkjson and chunkjson['type'] == 'content_block_delta':
|
|
||||||
gpt_replying_buffer += chunkjson['delta']['text']
|
|
||||||
history[-1] = gpt_replying_buffer
|
history[-1] = gpt_replying_buffer
|
||||||
chatbot[-1] = (history[-2], history[-1])
|
chatbot[-1] = (history[-2], history[-1])
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg='正常') # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg='正常') # 刷新界面
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
chunk = get_full_error(chunk, stream_response)
|
from toolbox import regular_txt_to_markdown
|
||||||
chunk_decoded = chunk.decode()
|
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||||
error_msg = chunk_decoded
|
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str}")
|
||||||
print(error_msg)
|
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + tb_str) # 刷新界面
|
||||||
raise RuntimeError("Json解析不合常规")
|
return
|
||||||
|
|
||||||
def multiple_picture_types(image_paths):
|
|
||||||
"""
|
|
||||||
根据图片类型返回image/jpeg, image/png, image/gif, image/webp,无法判断则返回image/jpeg
|
|
||||||
"""
|
|
||||||
for image_path in image_paths:
|
|
||||||
if image_path.endswith('.jpeg') or image_path.endswith('.jpg'):
|
|
||||||
return 'image/jpeg'
|
|
||||||
elif image_path.endswith('.png'):
|
|
||||||
return 'image/png'
|
|
||||||
elif image_path.endswith('.gif'):
|
|
||||||
return 'image/gif'
|
|
||||||
elif image_path.endswith('.webp'):
|
|
||||||
return 'image/webp'
|
|
||||||
return 'image/jpeg'
|
|
||||||
|
|
||||||
def generate_payload(inputs, llm_kwargs, history, system_prompt, image_paths):
|
|
||||||
|
|
||||||
|
# https://github.com/jtsang4/claude-to-chatgpt/blob/main/claude_to_chatgpt/adapter.py
|
||||||
|
def convert_messages_to_prompt(messages):
|
||||||
|
prompt = ""
|
||||||
|
role_map = {
|
||||||
|
"system": "Human",
|
||||||
|
"user": "Human",
|
||||||
|
"assistant": "Assistant",
|
||||||
|
}
|
||||||
|
for message in messages:
|
||||||
|
role = message["role"]
|
||||||
|
content = message["content"]
|
||||||
|
transformed_role = role_map[role]
|
||||||
|
prompt += f"\n\n{transformed_role.capitalize()}: {content}"
|
||||||
|
prompt += "\n\nAssistant: "
|
||||||
|
return prompt
|
||||||
|
|
||||||
|
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
||||||
"""
|
"""
|
||||||
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
||||||
"""
|
"""
|
||||||
|
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
|
||||||
|
|
||||||
conversation_cnt = len(history) // 2
|
conversation_cnt = len(history) // 2
|
||||||
|
|
||||||
messages = []
|
messages = [{"role": "system", "content": system_prompt}]
|
||||||
|
|
||||||
if conversation_cnt:
|
if conversation_cnt:
|
||||||
for index in range(0, 2*conversation_cnt, 2):
|
for index in range(0, 2*conversation_cnt, 2):
|
||||||
what_i_have_asked = {}
|
what_i_have_asked = {}
|
||||||
what_i_have_asked["role"] = "user"
|
what_i_have_asked["role"] = "user"
|
||||||
what_i_have_asked["content"] = [{"type": "text", "text": history[index]}]
|
what_i_have_asked["content"] = history[index]
|
||||||
what_gpt_answer = {}
|
what_gpt_answer = {}
|
||||||
what_gpt_answer["role"] = "assistant"
|
what_gpt_answer["role"] = "assistant"
|
||||||
what_gpt_answer["content"] = [{"type": "text", "text": history[index+1]}]
|
what_gpt_answer["content"] = history[index+1]
|
||||||
if what_i_have_asked["content"][0]["text"] != "":
|
if what_i_have_asked["content"] != "":
|
||||||
if what_i_have_asked["content"][0]["text"] == "": continue
|
if what_gpt_answer["content"] == "": continue
|
||||||
if what_i_have_asked["content"][0]["text"] == timeout_bot_msg: continue
|
if what_gpt_answer["content"] == timeout_bot_msg: continue
|
||||||
messages.append(what_i_have_asked)
|
messages.append(what_i_have_asked)
|
||||||
messages.append(what_gpt_answer)
|
messages.append(what_gpt_answer)
|
||||||
else:
|
else:
|
||||||
messages[-1]['content'][0]['text'] = what_gpt_answer['content'][0]['text']
|
messages[-1]['content'] = what_gpt_answer['content']
|
||||||
|
|
||||||
if any([llm_kwargs['llm_model'] == model for model in Claude_3_Models]) and image_paths:
|
|
||||||
what_i_ask_now = {}
|
what_i_ask_now = {}
|
||||||
what_i_ask_now["role"] = "user"
|
what_i_ask_now["role"] = "user"
|
||||||
what_i_ask_now["content"] = []
|
what_i_ask_now["content"] = inputs
|
||||||
for image_path in image_paths:
|
|
||||||
what_i_ask_now["content"].append({
|
|
||||||
"type": "image",
|
|
||||||
"source": {
|
|
||||||
"type": "base64",
|
|
||||||
"media_type": multiple_picture_types(image_paths),
|
|
||||||
"data": encode_image(image_path),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
what_i_ask_now["content"].append({"type": "text", "text": inputs})
|
|
||||||
else:
|
|
||||||
what_i_ask_now = {}
|
|
||||||
what_i_ask_now["role"] = "user"
|
|
||||||
what_i_ask_now["content"] = [{"type": "text", "text": inputs}]
|
|
||||||
messages.append(what_i_ask_now)
|
messages.append(what_i_ask_now)
|
||||||
# 开始整理headers与message
|
prompt = convert_messages_to_prompt(messages)
|
||||||
headers = {
|
|
||||||
'x-api-key': ANTHROPIC_API_KEY,
|
return prompt
|
||||||
'anthropic-version': '2023-06-01',
|
|
||||||
'content-type': 'application/json'
|
|
||||||
}
|
|
||||||
payload = {
|
|
||||||
'model': llm_kwargs['llm_model'],
|
|
||||||
'max_tokens': 4096,
|
|
||||||
'messages': messages,
|
|
||||||
'temperature': llm_kwargs['temperature'],
|
|
||||||
'stream': True,
|
|
||||||
'system': system_prompt
|
|
||||||
}
|
|
||||||
return headers, payload
|
|
||||||
|
|||||||
@@ -1,328 +0,0 @@
|
|||||||
# 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目
|
|
||||||
|
|
||||||
"""
|
|
||||||
该文件中主要包含三个函数
|
|
||||||
|
|
||||||
不具备多线程能力的函数:
|
|
||||||
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
|
|
||||||
|
|
||||||
具备多线程调用能力的函数
|
|
||||||
2. predict_no_ui_long_connection:支持多线程
|
|
||||||
"""
|
|
||||||
|
|
||||||
import json
|
|
||||||
import time
|
|
||||||
import gradio as gr
|
|
||||||
import logging
|
|
||||||
import traceback
|
|
||||||
import requests
|
|
||||||
import importlib
|
|
||||||
import random
|
|
||||||
|
|
||||||
# config_private.py放自己的秘密如API和代理网址
|
|
||||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
|
||||||
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history
|
|
||||||
from toolbox import trimmed_format_exc, is_the_upload_folder, read_one_api_model_name, log_chat
|
|
||||||
from toolbox import ChatBotWithCookies
|
|
||||||
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
|
||||||
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY')
|
|
||||||
|
|
||||||
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
|
||||||
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
|
||||||
|
|
||||||
def get_full_error(chunk, stream_response):
|
|
||||||
"""
|
|
||||||
获取完整的从Cohere返回的报错
|
|
||||||
"""
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
chunk += next(stream_response)
|
|
||||||
except:
|
|
||||||
break
|
|
||||||
return chunk
|
|
||||||
|
|
||||||
def decode_chunk(chunk):
|
|
||||||
# 提前读取一些信息 (用于判断异常)
|
|
||||||
chunk_decoded = chunk.decode()
|
|
||||||
chunkjson = None
|
|
||||||
has_choices = False
|
|
||||||
choice_valid = False
|
|
||||||
has_content = False
|
|
||||||
has_role = False
|
|
||||||
try:
|
|
||||||
chunkjson = json.loads(chunk_decoded)
|
|
||||||
has_choices = 'choices' in chunkjson
|
|
||||||
if has_choices: choice_valid = (len(chunkjson['choices']) > 0)
|
|
||||||
if has_choices and choice_valid: has_content = ("content" in chunkjson['choices'][0]["delta"])
|
|
||||||
if has_content: has_content = (chunkjson['choices'][0]["delta"]["content"] is not None)
|
|
||||||
if has_choices and choice_valid: has_role = "role" in chunkjson['choices'][0]["delta"]
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
return chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role
|
|
||||||
|
|
||||||
from functools import lru_cache
|
|
||||||
@lru_cache(maxsize=32)
|
|
||||||
def verify_endpoint(endpoint):
|
|
||||||
"""
|
|
||||||
检查endpoint是否可用
|
|
||||||
"""
|
|
||||||
if "你亲手写的api名称" in endpoint:
|
|
||||||
raise ValueError("Endpoint不正确, 请检查AZURE_ENDPOINT的配置! 当前的Endpoint为:" + endpoint)
|
|
||||||
return endpoint
|
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=None, console_slience:bool=False):
|
|
||||||
"""
|
|
||||||
发送,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
|
||||||
inputs:
|
|
||||||
是本次问询的输入
|
|
||||||
sys_prompt:
|
|
||||||
系统静默prompt
|
|
||||||
llm_kwargs:
|
|
||||||
内部调优参数
|
|
||||||
history:
|
|
||||||
是之前的对话列表
|
|
||||||
observe_window = None:
|
|
||||||
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
|
||||||
"""
|
|
||||||
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
|
||||||
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
|
|
||||||
retry = 0
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
# make a POST request to the API endpoint, stream=False
|
|
||||||
from .bridge_all import model_info
|
|
||||||
endpoint = verify_endpoint(model_info[llm_kwargs['llm_model']]['endpoint'])
|
|
||||||
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
|
||||||
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
|
|
||||||
except requests.exceptions.ReadTimeout as e:
|
|
||||||
retry += 1
|
|
||||||
traceback.print_exc()
|
|
||||||
if retry > MAX_RETRY: raise TimeoutError
|
|
||||||
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
|
||||||
|
|
||||||
stream_response = response.iter_lines()
|
|
||||||
result = ''
|
|
||||||
json_data = None
|
|
||||||
while True:
|
|
||||||
try: chunk = next(stream_response)
|
|
||||||
except StopIteration:
|
|
||||||
break
|
|
||||||
except requests.exceptions.ConnectionError:
|
|
||||||
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
|
|
||||||
chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role = decode_chunk(chunk)
|
|
||||||
if chunkjson['event_type'] == 'stream-start': continue
|
|
||||||
if chunkjson['event_type'] == 'text-generation':
|
|
||||||
result += chunkjson["text"]
|
|
||||||
if not console_slience: print(chunkjson["text"], end='')
|
|
||||||
if observe_window is not None:
|
|
||||||
# 观测窗,把已经获取的数据显示出去
|
|
||||||
if len(observe_window) >= 1:
|
|
||||||
observe_window[0] += chunkjson["text"]
|
|
||||||
# 看门狗,如果超过期限没有喂狗,则终止
|
|
||||||
if len(observe_window) >= 2:
|
|
||||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
|
||||||
raise RuntimeError("用户取消了程序。")
|
|
||||||
if chunkjson['event_type'] == 'stream-end': break
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
|
|
||||||
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
|
||||||
"""
|
|
||||||
发送至chatGPT,流式获取输出。
|
|
||||||
用于基础的对话功能。
|
|
||||||
inputs 是本次问询的输入
|
|
||||||
top_p, temperature是chatGPT的内部调优参数
|
|
||||||
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
|
||||||
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
|
||||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
|
||||||
"""
|
|
||||||
# if is_any_api_key(inputs):
|
|
||||||
# chatbot._cookies['api_key'] = inputs
|
|
||||||
# chatbot.append(("输入已识别为Cohere的api_key", what_keys(inputs)))
|
|
||||||
# yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
|
|
||||||
# return
|
|
||||||
# elif not is_any_api_key(chatbot._cookies['api_key']):
|
|
||||||
# chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。"))
|
|
||||||
# yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面
|
|
||||||
# return
|
|
||||||
|
|
||||||
user_input = inputs
|
|
||||||
if additional_fn is not None:
|
|
||||||
from core_functional import handle_core_functionality
|
|
||||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
|
||||||
|
|
||||||
raw_input = inputs
|
|
||||||
# logging.info(f'[raw_input] {raw_input}')
|
|
||||||
chatbot.append((inputs, ""))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
|
||||||
|
|
||||||
# check mis-behavior
|
|
||||||
if is_the_upload_folder(user_input):
|
|
||||||
chatbot[-1] = (inputs, f"[Local Message] 检测到操作错误!当您上传文档之后,需点击“**函数插件区**”按钮进行处理,请勿点击“提交”按钮或者“基础功能区”按钮。")
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
|
||||||
time.sleep(2)
|
|
||||||
|
|
||||||
try:
|
|
||||||
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
|
|
||||||
except RuntimeError as e:
|
|
||||||
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
|
|
||||||
return
|
|
||||||
|
|
||||||
# 检查endpoint是否合法
|
|
||||||
try:
|
|
||||||
from .bridge_all import model_info
|
|
||||||
endpoint = verify_endpoint(model_info[llm_kwargs['llm_model']]['endpoint'])
|
|
||||||
except:
|
|
||||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
|
||||||
chatbot[-1] = (inputs, tb_str)
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="Endpoint不满足要求") # 刷新界面
|
|
||||||
return
|
|
||||||
|
|
||||||
history.append(inputs); history.append("")
|
|
||||||
|
|
||||||
retry = 0
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
# make a POST request to the API endpoint, stream=True
|
|
||||||
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
|
||||||
json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
|
|
||||||
except:
|
|
||||||
retry += 1
|
|
||||||
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
|
|
||||||
retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
|
|
||||||
if retry > MAX_RETRY: raise TimeoutError
|
|
||||||
|
|
||||||
gpt_replying_buffer = ""
|
|
||||||
|
|
||||||
is_head_of_the_stream = True
|
|
||||||
if stream:
|
|
||||||
stream_response = response.iter_lines()
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
chunk = next(stream_response)
|
|
||||||
except StopIteration:
|
|
||||||
# 非Cohere官方接口的出现这样的报错,Cohere和API2D不会走这里
|
|
||||||
chunk_decoded = chunk.decode()
|
|
||||||
error_msg = chunk_decoded
|
|
||||||
# 其他情况,直接返回报错
|
|
||||||
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="非Cohere官方接口返回了错误:" + chunk.decode()) # 刷新界面
|
|
||||||
return
|
|
||||||
|
|
||||||
# 提前读取一些信息 (用于判断异常)
|
|
||||||
chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role = decode_chunk(chunk)
|
|
||||||
|
|
||||||
if chunkjson:
|
|
||||||
try:
|
|
||||||
if chunkjson['event_type'] == 'stream-start':
|
|
||||||
continue
|
|
||||||
if chunkjson['event_type'] == 'text-generation':
|
|
||||||
gpt_replying_buffer = gpt_replying_buffer + chunkjson["text"]
|
|
||||||
history[-1] = gpt_replying_buffer
|
|
||||||
chatbot[-1] = (history[-2], history[-1])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
|
||||||
if chunkjson['event_type'] == 'stream-end':
|
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
|
||||||
history[-1] = gpt_replying_buffer
|
|
||||||
chatbot[-1] = (history[-2], history[-1])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
|
||||||
break
|
|
||||||
except Exception as e:
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
|
|
||||||
chunk = get_full_error(chunk, stream_response)
|
|
||||||
chunk_decoded = chunk.decode()
|
|
||||||
error_msg = chunk_decoded
|
|
||||||
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
|
|
||||||
print(error_msg)
|
|
||||||
return
|
|
||||||
|
|
||||||
def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg):
|
|
||||||
from .bridge_all import model_info
|
|
||||||
Cohere_website = ' 请登录Cohere查看详情 https://platform.Cohere.com/signup'
|
|
||||||
if "reduce the length" in error_msg:
|
|
||||||
if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出
|
|
||||||
history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'],
|
|
||||||
max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一
|
|
||||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)")
|
|
||||||
elif "does not exist" in error_msg:
|
|
||||||
chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.")
|
|
||||||
elif "Incorrect API key" in error_msg:
|
|
||||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. Cohere以提供了不正确的API_KEY为由, 拒绝服务. " + Cohere_website)
|
|
||||||
elif "exceeded your current quota" in error_msg:
|
|
||||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. Cohere以账户额度不足为由, 拒绝服务." + Cohere_website)
|
|
||||||
elif "account is not active" in error_msg:
|
|
||||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Your account is not active. Cohere以账户失效为由, 拒绝服务." + Cohere_website)
|
|
||||||
elif "associated with a deactivated account" in error_msg:
|
|
||||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] You are associated with a deactivated account. Cohere以账户失效为由, 拒绝服务." + Cohere_website)
|
|
||||||
elif "API key has been deactivated" in error_msg:
|
|
||||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] API key has been deactivated. Cohere以账户失效为由, 拒绝服务." + Cohere_website)
|
|
||||||
elif "bad forward key" in error_msg:
|
|
||||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.")
|
|
||||||
elif "Not enough point" in error_msg:
|
|
||||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Not enough point. API2D账户点数不足.")
|
|
||||||
else:
|
|
||||||
from toolbox import regular_txt_to_markdown
|
|
||||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
|
||||||
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}")
|
|
||||||
return chatbot, history
|
|
||||||
|
|
||||||
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
|
||||||
"""
|
|
||||||
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
|
||||||
"""
|
|
||||||
# if not is_any_api_key(llm_kwargs['api_key']):
|
|
||||||
# raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")
|
|
||||||
|
|
||||||
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
"Authorization": f"Bearer {api_key}"
|
|
||||||
}
|
|
||||||
if API_ORG.startswith('org-'): headers.update({"Cohere-Organization": API_ORG})
|
|
||||||
if llm_kwargs['llm_model'].startswith('azure-'):
|
|
||||||
headers.update({"api-key": api_key})
|
|
||||||
if llm_kwargs['llm_model'] in AZURE_CFG_ARRAY.keys():
|
|
||||||
azure_api_key_unshared = AZURE_CFG_ARRAY[llm_kwargs['llm_model']]["AZURE_API_KEY"]
|
|
||||||
headers.update({"api-key": azure_api_key_unshared})
|
|
||||||
|
|
||||||
conversation_cnt = len(history) // 2
|
|
||||||
|
|
||||||
messages = [{"role": "SYSTEM", "message": system_prompt}]
|
|
||||||
if conversation_cnt:
|
|
||||||
for index in range(0, 2*conversation_cnt, 2):
|
|
||||||
what_i_have_asked = {}
|
|
||||||
what_i_have_asked["role"] = "USER"
|
|
||||||
what_i_have_asked["message"] = history[index]
|
|
||||||
what_gpt_answer = {}
|
|
||||||
what_gpt_answer["role"] = "CHATBOT"
|
|
||||||
what_gpt_answer["message"] = history[index+1]
|
|
||||||
if what_i_have_asked["message"] != "":
|
|
||||||
if what_gpt_answer["message"] == "": continue
|
|
||||||
if what_gpt_answer["message"] == timeout_bot_msg: continue
|
|
||||||
messages.append(what_i_have_asked)
|
|
||||||
messages.append(what_gpt_answer)
|
|
||||||
else:
|
|
||||||
messages[-1]['message'] = what_gpt_answer['message']
|
|
||||||
|
|
||||||
model = llm_kwargs['llm_model']
|
|
||||||
if model.startswith('cohere-'): model = model[len('cohere-'):]
|
|
||||||
payload = {
|
|
||||||
"model": model,
|
|
||||||
"message": inputs,
|
|
||||||
"chat_history": messages,
|
|
||||||
"temperature": llm_kwargs['temperature'], # 1.0,
|
|
||||||
"top_p": llm_kwargs['top_p'], # 1.0,
|
|
||||||
"n": 1,
|
|
||||||
"stream": stream,
|
|
||||||
"presence_penalty": 0,
|
|
||||||
"frequency_penalty": 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
return headers,payload
|
|
||||||
|
|
||||||
|
|
||||||
@@ -7,8 +7,7 @@ import re
|
|||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
from request_llms.com_google import GoogleChatInit
|
from request_llms.com_google import GoogleChatInit
|
||||||
from toolbox import ChatBotWithCookies
|
from toolbox import get_conf, update_ui, update_ui_lastest_msg, have_any_recent_upload_image_files, trimmed_format_exc
|
||||||
from toolbox import get_conf, update_ui, update_ui_lastest_msg, have_any_recent_upload_image_files, trimmed_format_exc, log_chat
|
|
||||||
|
|
||||||
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY')
|
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY')
|
||||||
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
||||||
@@ -21,7 +20,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
if get_conf("GEMINI_API_KEY") == "":
|
if get_conf("GEMINI_API_KEY") == "":
|
||||||
raise ValueError(f"请配置 GEMINI_API_KEY。")
|
raise ValueError(f"请配置 GEMINI_API_KEY。")
|
||||||
|
|
||||||
genai = GoogleChatInit(llm_kwargs)
|
genai = GoogleChatInit()
|
||||||
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
||||||
gpt_replying_buffer = ''
|
gpt_replying_buffer = ''
|
||||||
stream_response = genai.generate_chat(inputs, llm_kwargs, history, sys_prompt)
|
stream_response = genai.generate_chat(inputs, llm_kwargs, history, sys_prompt)
|
||||||
@@ -45,8 +44,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
return gpt_replying_buffer
|
return gpt_replying_buffer
|
||||||
|
|
||||||
|
|
||||||
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
|
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
|
||||||
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
|
||||||
# 检查API_KEY
|
# 检查API_KEY
|
||||||
if get_conf("GEMINI_API_KEY") == "":
|
if get_conf("GEMINI_API_KEY") == "":
|
||||||
yield from update_ui_lastest_msg(f"请配置 GEMINI_API_KEY。", chatbot=chatbot, history=history, delay=0)
|
yield from update_ui_lastest_msg(f"请配置 GEMINI_API_KEY。", chatbot=chatbot, history=history, delay=0)
|
||||||
@@ -72,7 +70,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
|
|
||||||
chatbot.append((inputs, ""))
|
chatbot.append((inputs, ""))
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
genai = GoogleChatInit(llm_kwargs)
|
genai = GoogleChatInit()
|
||||||
retry = 0
|
retry = 0
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
@@ -99,7 +97,6 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
gpt_replying_buffer += paraphrase['text'] # 使用 json 解析库进行处理
|
gpt_replying_buffer += paraphrase['text'] # 使用 json 解析库进行处理
|
||||||
chatbot[-1] = (inputs, gpt_replying_buffer)
|
chatbot[-1] = (inputs, gpt_replying_buffer)
|
||||||
history[-1] = gpt_replying_buffer
|
history[-1] = gpt_replying_buffer
|
||||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
if error_match:
|
if error_match:
|
||||||
history = history[-2] # 错误的不纳入对话
|
history = history[-2] # 错误的不纳入对话
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
|
|
||||||
|
from transformers import AutoModel, AutoTokenizer
|
||||||
import time
|
import time
|
||||||
import threading
|
import threading
|
||||||
import importlib
|
import importlib
|
||||||
from toolbox import update_ui, get_conf
|
from toolbox import update_ui, get_conf
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
from transformers import AutoModel, AutoTokenizer
|
|
||||||
|
|
||||||
load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
||||||
|
|
||||||
@@ -106,8 +106,7 @@ class GetGLMHandle(Process):
|
|||||||
global llama_glm_handle
|
global llama_glm_handle
|
||||||
llama_glm_handle = None
|
llama_glm_handle = None
|
||||||
#################################################################################
|
#################################################################################
|
||||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||||
observe_window:list=[], console_slience:bool=False):
|
|
||||||
"""
|
"""
|
||||||
多线程方法
|
多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
|
|||||||
某些文件未显示,因为此 diff 中更改的文件太多 显示更多
在新工单中引用
屏蔽一个用户