From 4824905592d608e133eb9e4962c803ace0a369b0 Mon Sep 17 00:00:00 2001 From: Yao Xiao Date: Tue, 7 Nov 2023 09:48:01 +0800 Subject: [PATCH 01/31] Add new API support --- config.py | 2 +- request_llm/bridge_all.py | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/config.py b/config.py index b4f00a63..903ee20f 100644 --- a/config.py +++ b/config.py @@ -87,7 +87,7 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体'] # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ -AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", +AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview","gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py index f85d1b6b..3d6e4bd7 100644 --- a/request_llm/bridge_all.py +++ b/request_llm/bridge_all.py @@ -117,6 +117,15 @@ model_info = { "token_cnt": get_token_num_gpt35, }, + "gpt-3.5-turbo-1106": {#16k + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 16385, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, + "gpt-4": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, @@ -135,6 +144,15 @@ model_info = { "token_cnt": get_token_num_gpt4, }, + "gpt-4-1106-preview": { + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 128000, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, + }, + "gpt-3.5-random": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, From 136e6aaa21102cc41ca87afa62be8ee510e07c41 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:08:24 +0800 Subject: [PATCH 02/31] Update config.py --- config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config.py b/config.py index b4f00a63..b74f1e66 100644 --- a/config.py +++ b/config.py @@ -87,9 +87,9 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体'] # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ -AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', - "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", + "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] # P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] From 0897057be1ab8ebdf3ffad9b3d4c50add68b6aa5 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:11:52 +0800 Subject: [PATCH 03/31] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0378eaa1..eeb354a7 100644 --- a/README.md +++ b/README.md @@ -139,7 +139,7 @@ git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llm/moss # 参考wiki:https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner # 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

From 4d9256296d2ae17e49d523ce4dc557104c35ce1c Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:13:37 +0800 Subject: [PATCH 04/31] =?UTF-8?q?Update=20=E5=A4=9A=E6=99=BA=E8=83=BD?= =?UTF-8?q?=E4=BD=93.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/多智能体.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crazy_functions/多智能体.py b/crazy_functions/多智能体.py index 5a4c4a58..a2d0ce74 100644 --- a/crazy_functions/多智能体.py +++ b/crazy_functions/多智能体.py @@ -32,7 +32,7 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ web_port 当前软件运行的端口号 """ # 检查当前的模型是否符合要求 - supported_llms = ['gpt-3.5-turbo-16k', 'gpt-4', 'gpt-4-32k', + supported_llms = ['gpt-3.5-turbo-16k', 'gpt-3.5-turbo-1106', 'gpt-4', 'gpt-4-32k', 'gpt-4-1106-preview', 'api2d-gpt-3.5-turbo-16k', 'api2d-gpt-4'] llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) if llm_kwargs['llm_model'] not in supported_llms: From 3ed0e8012d86be9f37d11be8db5dc2a1dfb1db51 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:17:01 +0800 Subject: [PATCH 05/31] Update bridge_all.py --- request_llm/bridge_all.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py index f85d1b6b..2c535378 100644 --- a/request_llm/bridge_all.py +++ b/request_llm/bridge_all.py @@ -108,6 +108,15 @@ model_info = { "token_cnt": get_token_num_gpt35, }, + "gpt-3.5-turbo-1106": { + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 1024 *16, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, + "gpt-3.5-turbo-16k-0613": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, @@ -135,6 +144,15 @@ model_info = { "token_cnt": get_token_num_gpt4, }, + "gpt-4-1106-preview": { + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 1024 * 128, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, + }, + "gpt-3.5-random": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, From 61cf2b32eb979607b41f1240f478f70d9d447b5a Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:21:08 +0800 Subject: [PATCH 06/31] Update README.md.German.md --- docs/README.md.German.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/README.md.German.md b/docs/README.md.German.md index d514de30..fa15a8a7 100644 --- a/docs/README.md.German.md +++ b/docs/README.md.German.md @@ -111,7 +111,7 @@ python -m pip install -r request_llm/requirements_moss.txt git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # When executing this line of code, you must be in the project root path # [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently supported models are as follows (jittorllms series currently only supports docker solutions): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

@@ -304,4 +304,4 @@ https://github.com/kaixindelele/ChatPaper # Mehr: https://github.com/gradio-app/gradio https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file +``` From bba3419ace4031bb8a29695a53ffe0ea54abac98 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:21:32 +0800 Subject: [PATCH 07/31] Update README.md.Italian.md --- docs/README.md.Italian.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/README.md.Italian.md b/docs/README.md.Italian.md index 76efe185..fc01a27e 100644 --- a/docs/README.md.Italian.md +++ b/docs/README.md.Italian.md @@ -117,7 +117,7 @@ python -m pip install -r request_llm/requirements_moss.txt git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Si prega di notare che quando si esegue questa riga di codice, si deve essere nella directory radice del progetto # 【Passaggio facoltativo III】 Assicurati che il file di configurazione config.py includa tutti i modelli desiderati, al momento tutti i modelli supportati sono i seguenti (i modelli della serie jittorllms attualmente supportano solo la soluzione docker): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

From 6c3405ba550c9e846643ce78b51675310ecfbb2a Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:21:52 +0800 Subject: [PATCH 08/31] Update README.md.Korean.md --- docs/README.md.Korean.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/README.md.Korean.md b/docs/README.md.Korean.md index 61b8e4a0..7bdcb8b9 100644 --- a/docs/README.md.Korean.md +++ b/docs/README.md.Korean.md @@ -112,7 +112,7 @@ git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # 다음 코 # [선택 사항III] AVAIL_LLM_MODELS config.py 구성 파일에 기대하는 모델이 포함되어 있는지 확인하십시오. # 현재 지원되는 전체 모델 : -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

From cd40bf9ae2e9dd047aebc42ab4e40a6ba95bb219 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:22:12 +0800 Subject: [PATCH 09/31] Update README.md.Portuguese.md --- docs/README.md.Portuguese.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/README.md.Portuguese.md b/docs/README.md.Portuguese.md index 2347d5a7..6c368f75 100644 --- a/docs/README.md.Portuguese.md +++ b/docs/README.md.Portuguese.md @@ -127,7 +127,7 @@ python -m pip install -r request_llm/requirements_moss.txt git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note: When executing this line of code, you must be in the project root path # 【Optional Step III】Make sure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports docker solutions): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

From 77220002e0faad0627f09fa40320c9c70a68269a Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:22:29 +0800 Subject: [PATCH 10/31] Update README_EN.md --- docs/README_EN.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/README_EN.md b/docs/README_EN.md index 02b8588c..1a68810a 100644 --- a/docs/README_EN.md +++ b/docs/README_EN.md @@ -114,7 +114,7 @@ python -m pip install -r request_llm/requirements_moss.txt git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # When executing this line of code, you must be in the root directory of the project # [Optional Step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file includes the expected models. Currently supported models are as follows (the jittorllms series only supports the docker solution for the time being): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

@@ -319,4 +319,4 @@ https://github.com/kaixindelele/ChatPaper # More: https://github.com/gradio-app/gradio https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file +``` From 3141cd392a881edf37644acbd61b87d728fc1a16 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:22:46 +0800 Subject: [PATCH 11/31] Update README_FR.md --- docs/README_FR.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/README_FR.md b/docs/README_FR.md index af3bb42c..4df3c840 100644 --- a/docs/README_FR.md +++ b/docs/README_FR.md @@ -119,7 +119,7 @@ python -m pip install -r request_llm/requirements_moss.txt git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note: When running this line of code, you must be in the project root path. # 【Optional Step III】Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the desired model. Currently, all models supported are as follows (the jittorllms series currently only supports the docker scheme): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

@@ -320,4 +320,4 @@ https://github.com/kaixindelele/ChatPaper # Plus : https://github.com/gradio-app/gradio https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file +``` From 6c795809f76826fda9da5630133d816c46dee856 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:23:01 +0800 Subject: [PATCH 12/31] Update README_JP.md --- docs/README_JP.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/README_JP.md b/docs/README_JP.md index 46145e1f..3b03545a 100644 --- a/docs/README_JP.md +++ b/docs/README_JP.md @@ -128,7 +128,7 @@ python -m pip install -r request_llm/requirements_moss.txt git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, it must be in the project root. # 【Optional Step III】Ensure that the AVAIL_LLM_MODELS in the config.py configuration file contains the expected model. Currently, all supported models are as follows (jittorllms series currently only supports the docker solution): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

@@ -326,4 +326,4 @@ https://github.com/kaixindelele/ChatPaper # その他: https://github.com/gradio-app/gradio https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file +``` From 3a2466fe4ea6281f24affdccf22d3321b7ec7d52 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:23:16 +0800 Subject: [PATCH 13/31] Update README_RS.md --- docs/README_RS.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/README_RS.md b/docs/README_RS.md index d4888a05..e050d442 100644 --- a/docs/README_RS.md +++ b/docs/README_RS.md @@ -116,7 +116,7 @@ python -m pip install -r request_llm/requirements_moss.txt git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, you must be in the project root path # [Optional step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently, all supported models are as follows (the jittorllms series currently only supports the docker solution): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] ```

@@ -275,4 +275,4 @@ https://github.com/kaixindelele/ChatPaper # Больше: https://github.com/gradio-app/gradio https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file +``` From ffe6c1403e2d7f4a07434da3af07de5f21134973 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:25:36 +0800 Subject: [PATCH 14/31] Update bridge_chatgpt.py --- request_llm/bridge_chatgpt.py | 1 + 1 file changed, 1 insertion(+) diff --git a/request_llm/bridge_chatgpt.py b/request_llm/bridge_chatgpt.py index 9903da9d..292de0ad 100644 --- a/request_llm/bridge_chatgpt.py +++ b/request_llm/bridge_chatgpt.py @@ -351,6 +351,7 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): model = random.choice([ "gpt-3.5-turbo", "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-0301", From 996057e588e54df709fcfbee9f5bacbc5358c049 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 7 Nov 2023 15:41:04 +0800 Subject: [PATCH 15/31] support chatglm3 --- config.py | 6 +++++- crazy_functions/询问多个大语言模型.py | 7 ++++--- main.py | 4 ++-- request_llms/local_llm_class.py | 14 +++++++------- version | 2 +- 5 files changed, 19 insertions(+), 14 deletions(-) diff --git a/config.py b/config.py index 06840dd8..f578aa85 100644 --- a/config.py +++ b/config.py @@ -90,11 +90,15 @@ LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", - "chatglm", "moss", "newbing", "claude-2"] + "chatglm3", "moss", "newbing", "claude-2"] # P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] +# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4" +MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3" + + # 百度千帆(LLM_MODEL="qianfan") BAIDU_CLOUD_API_KEY = '' BAIDU_CLOUD_SECRET_KEY = '' diff --git a/crazy_functions/询问多个大语言模型.py b/crazy_functions/询问多个大语言模型.py index 80e09fcd..4210fb21 100644 --- a/crazy_functions/询问多个大语言模型.py +++ b/crazy_functions/询问多个大语言模型.py @@ -1,4 +1,4 @@ -from toolbox import CatchException, update_ui +from toolbox import CatchException, update_ui, get_conf from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive import datetime @CatchException @@ -13,11 +13,12 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt web_port 当前软件运行的端口号 """ history = [] # 清空历史,以免输入溢出 - chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……")) + MULTI_QUERY_LLM_MODELS = get_conf('MULTI_QUERY_LLM_MODELS') + chatbot.append((txt, "正在同时咨询" + MULTI_QUERY_LLM_MODELS)) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 - llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 + llm_kwargs['llm_model'] = MULTI_QUERY_LLM_MODELS # 支持任意数量的llm接口,用&符号分隔 gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=txt, inputs_show_user=txt, llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, diff --git a/main.py b/main.py index bf843825..a621deb1 100644 --- a/main.py +++ b/main.py @@ -433,7 +433,7 @@ def main(): server_port=PORT, favicon_path=os.path.join(os.path.dirname(__file__), "docs/logo.png"), auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None, - blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile","gpt_log/admin"]) + blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"]) # 如果需要在二级路径下运行 # CUSTOM_PATH = get_conf('CUSTOM_PATH') @@ -442,7 +442,7 @@ def main(): # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH) # else: # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png", - # blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"]) + # blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"]) if __name__ == "__main__": main() diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index b6f49ba4..b6ce801e 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -5,18 +5,18 @@ from multiprocessing import Process, Pipe from contextlib import redirect_stdout from request_llms.queued_pipe import create_queue_pipe -class DebugLock(object): +class ThreadLock(object): def __init__(self): self._lock = threading.Lock() def acquire(self): - print("acquiring", self) + # print("acquiring", self) #traceback.print_tb self._lock.acquire() - print("acquired", self) + # print("acquired", self) def release(self): - print("released", self) + # print("released", self) #traceback.print_tb self._lock.release() @@ -85,7 +85,7 @@ class LocalLLMHandle(Process): self.is_main_process = False # state wrap for child process self.start() self.is_main_process = True # state wrap for child process - self.threadLock = DebugLock() + self.threadLock = ThreadLock() def get_state(self): # ⭐run in main process @@ -159,7 +159,7 @@ class LocalLLMHandle(Process): try: for response_full in self.llm_stream_generator(**kwargs): self.child.send(response_full) - print('debug' + response_full) + # print('debug' + response_full) self.child.send('[Finish]') # 请求处理结束,开始下一个循环 except: @@ -200,7 +200,7 @@ class LocalLLMHandle(Process): if res.startswith(self.std_tag): new_output = res[len(self.std_tag):] std_out = std_out[:std_out_clip_len] - print(new_output, end='') + # print(new_output, end='') std_out = new_output + std_out yield self.std_tag + '\n```\n' + std_out + '\n```\n' elif res == '[Finish]': diff --git a/version b/version index 1470eb40..f9db97e5 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { "version": 3.56, "show_feature": true, - "new_feature": "支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验 <-> 新增动态代码解释器(CodeInterpreter) <-> 增加文本回答复制按钮 <-> 细分代理场合 <-> 支持动态选择不同界面主题 <-> 提高稳定性&解决多用户冲突问题 <-> 支持插件分类和更多UI皮肤外观 <-> 支持用户使用自然语言调度各个插件(虚空终端) ! <-> 改进UI,设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率" + "new_feature": "支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" } From e9cf3d3d1219b365d813835e985ce57cb6b4217c Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 7 Nov 2023 15:52:08 +0800 Subject: [PATCH 16/31] version 3.57 --- README.md | 5 +++-- version | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 5e1f2d4f..d8b4756c 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ > > `pip install -r requirements.txt` > -> 2023.11.7: 本项目开源免费,**近期发现有人蔑视开源协议,利用本项目违法圈钱**,请各位提高警惕,谨防上当受骗。 +> 2023.11.7: 本项目开源免费,近期发现有人蔑视开源协议并利用本项目违规圈钱,请提高警惕,谨防上当受骗。 @@ -288,7 +288,8 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h ### II:版本: -- version 3.60(todo): 优化虚空终端,引入code interpreter和更多插件 +- version 3.60(todo): 优化虚空终端,并引入AutoGen作为新一代插件的基石 +- version 3.57: 支持GLM3,星火v3,文心一言v4,修复本地模型的并发BUG - version 3.56: 支持动态追加基础功能按钮,新汇报PDF汇总页面 - version 3.55: 重构前端界面,引入悬浮窗口与菜单栏 - version 3.54: 新增动态代码解释器(Code Interpreter)(待完善) diff --git a/version b/version index f9db97e5..5e4fb7d0 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.56, + "version": 3.57, "show_feature": true, "new_feature": "支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" } From 736f1214ee156a80c461cf011156f973bbd6cb56 Mon Sep 17 00:00:00 2001 From: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Date: Tue, 7 Nov 2023 15:55:23 +0800 Subject: [PATCH 17/31] Update bridge_all.py --- request_llm/bridge_all.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py index 2c535378..a1dd7f6c 100644 --- a/request_llm/bridge_all.py +++ b/request_llm/bridge_all.py @@ -112,7 +112,7 @@ model_info = { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "endpoint": openai_endpoint, - "max_token": 1024 *16, + "max_token": 16385, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, @@ -148,7 +148,7 @@ model_info = { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "endpoint": openai_endpoint, - "max_token": 1024 * 128, + "max_token": 128000, "tokenizer": tokenizer_gpt4, "token_cnt": get_token_num_gpt4, }, From 8d94564e675ac492068ffb9884a2df217bf7bf37 Mon Sep 17 00:00:00 2001 From: awwaawwa <8493196+awwaawwa@users.noreply.github.com> Date: Tue, 7 Nov 2023 15:59:07 +0800 Subject: [PATCH 18/31] =?UTF-8?q?=E4=BF=AE=E6=94=B9=20gpt-3.5-turbo-16k=20?= =?UTF-8?q?=E7=B3=BB=E5=88=97=E6=A8=A1=E5=9E=8B=20max=5Ftoken=20=E4=B8=BA?= =?UTF-8?q?=2016385?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 根据 https://platform.openai.com/docs/models/gpt-3-5 ,这个16k的3.5上下文窗口其实是16385 --- request_llms/bridge_all.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 27b91c26..69a99e9b 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -94,7 +94,7 @@ model_info = { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "endpoint": openai_endpoint, - "max_token": 1024*16, + "max_token": 16385, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, @@ -112,7 +112,7 @@ model_info = { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "endpoint": openai_endpoint, - "max_token": 1024 * 16, + "max_token": 16385, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, @@ -186,7 +186,7 @@ model_info = { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "endpoint": api2d_endpoint, - "max_token": 1024*16, + "max_token": 16385, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, From 12df41563a3446a8ca284b0837949d14a9025806 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Wed, 8 Nov 2023 18:40:36 +0800 Subject: [PATCH 19/31] hide audio btn border --- main.py | 2 +- themes/default.css | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/main.py b/main.py index a621deb1..89ca7811 100644 --- a/main.py +++ b/main.py @@ -94,7 +94,7 @@ def main(): clearBtn = gr.Button("清除", elem_id="elem_clear", variant="secondary", visible=False); clearBtn.style(size="sm") if ENABLE_AUDIO: with gr.Row(): - audio_mic = gr.Audio(source="microphone", type="numpy", streaming=True, show_label=False).style(container=False) + audio_mic = gr.Audio(source="microphone", type="numpy", elem_id="elem_audio", streaming=True, show_label=False).style(container=False) with gr.Row(): status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel") with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn: diff --git a/themes/default.css b/themes/default.css index 65d5940b..7c1d400f 100644 --- a/themes/default.css +++ b/themes/default.css @@ -1,3 +1,8 @@ +/* 插件下拉菜单 */ +#elem_audio { + border-style: hidden !important; +} + .dark { --background-fill-primary: #050810; --body-background-fill: var(--background-fill-primary); From 0ff750b60a645c739ab09173a86d0ab9b1482483 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 10 Nov 2023 12:40:25 +0800 Subject: [PATCH 20/31] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E7=BC=A9=E8=BF=9B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 3 ++- request_llms/bridge_all.py | 12 ++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/config.py b/config.py index f44c47dd..38d05198 100644 --- a/config.py +++ b/config.py @@ -87,7 +87,8 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体'] # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ -AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview","gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", +AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview", + "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm3", "moss", "newbing", "claude-2"] diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 6d34d951..3a93234e 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -145,12 +145,12 @@ model_info = { }, "gpt-4-1106-preview": { - "fn_with_ui": chatgpt_ui, - "fn_without_ui": chatgpt_noui, - "endpoint": openai_endpoint, - "max_token": 128000, - "tokenizer": tokenizer_gpt4, - "token_cnt": get_token_num_gpt4, + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 128000, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, }, "gpt-3.5-random": { From a1a91c25a5ac2e3928e7dd17b21f70e7694cfca6 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 10 Nov 2023 12:53:03 +0800 Subject: [PATCH 21/31] =?UTF-8?q?=E7=A7=BB=E9=99=A4=E9=87=8D=E5=A4=8D?= =?UTF-8?q?=E9=A1=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_all.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index cb71843e..3a93234e 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -108,15 +108,6 @@ model_info = { "token_cnt": get_token_num_gpt35, }, - "gpt-3.5-turbo-1106": { - "fn_with_ui": chatgpt_ui, - "fn_without_ui": chatgpt_noui, - "endpoint": openai_endpoint, - "max_token": 16385, - "tokenizer": tokenizer_gpt35, - "token_cnt": get_token_num_gpt35, - }, - "gpt-3.5-turbo-16k-0613": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, From 33bf795c663587ec86d5a852a14a7560b1af09a5 Mon Sep 17 00:00:00 2001 From: xiangsam Date: Fri, 10 Nov 2023 11:45:47 +0000 Subject: [PATCH 22/31] =?UTF-8?q?=E6=9B=B4=E6=96=B0=E7=B2=BE=E5=87=86?= =?UTF-8?q?=E7=BF=BB=E8=AF=91PDF=E6=96=87=E6=A1=A3(NOUGAT)=E6=8F=92?= =?UTF-8?q?=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/crazy_utils.py | 2 +- crazy_functions/批量翻译PDF文档_NOUGAT.py | 15 ++++++++++++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index a23c732b..ce7a2e39 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -748,7 +748,7 @@ class nougat_interface(): yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)", chatbot=chatbot, history=history, delay=0) - self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}"', os.getcwd(), timeout=3600) + self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}" --recompute --no-skipping --markdown --batchsize 8', os.getcwd(), timeout=3600) res = glob.glob(os.path.join(dst,'*.mmd')) if len(res) == 0: self.threadLock.release() diff --git a/crazy_functions/批量翻译PDF文档_NOUGAT.py b/crazy_functions/批量翻译PDF文档_NOUGAT.py index 3e50c93a..50e34c4e 100644 --- a/crazy_functions/批量翻译PDF文档_NOUGAT.py +++ b/crazy_functions/批量翻译PDF文档_NOUGAT.py @@ -73,6 +73,11 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst from .crazy_utils import get_files_from_everything success, file_manifest, project_folder = get_files_from_everything(txt, type='.pdf') + success_mmd, file_manifest_mmd, _ = get_files_from_everything(txt, type='.mmd') + success = success or success_mmd + file_manifest += file_manifest_mmd + chatbot.append(["文件列表:", ", ".join([e.split('/')[-1] for e in file_manifest])]); + yield from update_ui( chatbot=chatbot, history=history) # 检测输入参数,如没有给定输入参数,直接退出 if not success: if txt == "": txt = '空空如也的输入栏' @@ -101,9 +106,13 @@ def 解析PDF_基于NOUGAT(file_manifest, project_folder, llm_kwargs, plugin_kwa from crazy_functions.pdf_fns.report_gen_html import construct_html nougat_handle = nougat_interface() for index, fp in enumerate(file_manifest): - chatbot.append(["当前进度:", f"正在解析论文,请稍候。(第一次运行时,需要花费较长时间下载NOUGAT参数)"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - fpp = yield from nougat_handle.NOUGAT_parse_pdf(fp, chatbot, history) - promote_file_to_downloadzone(fpp, rename_file=os.path.basename(fpp)+'.nougat.mmd', chatbot=chatbot) + if fp.endswith('pdf'): + chatbot.append(["当前进度:", f"正在解析论文,请稍候。(第一次运行时,需要花费较长时间下载NOUGAT参数)"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + fpp = yield from nougat_handle.NOUGAT_parse_pdf(fp, chatbot, history) + promote_file_to_downloadzone(fpp, rename_file=os.path.basename(fpp)+'.nougat.mmd', chatbot=chatbot) + else: + chatbot.append(["当前论文无需解析:", fp]); yield from update_ui( chatbot=chatbot, history=history) + fpp = fp with open(fpp, 'r', encoding='utf8') as f: article_content = f.readlines() article_dict = markdown_to_dict(article_content) From 0299b0f95f6c264ca4dbf8809fb1e09c00702ec4 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 10 Nov 2023 20:59:08 +0800 Subject: [PATCH 23/31] =?UTF-8?q?=E6=94=AF=E6=8C=81DALLE3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 17 +++++++++++--- crazy_functions/图片生成.py | 31 ++++++++++++++++++++++++-- docs/translate_english.json | 2 +- docs/translate_japanese.json | 2 +- docs/translate_traditionalchinese.json | 2 +- 5 files changed, 46 insertions(+), 8 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index 2d7fa74b..60c85691 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -349,18 +349,29 @@ def get_crazy_functions(): print('Load function plugin failed') try: - from crazy_functions.图片生成 import 图片生成 + from crazy_functions.图片生成 import 图片生成, 图片生成_DALLE3 function_plugins.update({ "图片生成(先切换模型到openai或api2d)": { "Group": "对话", "Color": "stop", "AsButton": False, "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "在这里输入分辨率, 如256x256(默认)", # 高级参数输入区的显示提示 - "Info": "图片生成 | 输入参数字符串,提供图像的内容", + "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认)", # 高级参数输入区的显示提示 + "Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容", "Function": HotReload(图片生成) }, }) + function_plugins.update({ + "图片生成_DALLE3(先切换模型到openai或api2d)": { + "Group": "对话", + "Color": "stop", + "AsButton": False, + "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) + "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认)", # 高级参数输入区的显示提示 + "Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容", + "Function": HotReload(图片生成_DALLE3) + }, + }) except: print('Load function plugin failed') diff --git a/crazy_functions/图片生成.py b/crazy_functions/图片生成.py index 1b7dff5d..95b44813 100644 --- a/crazy_functions/图片生成.py +++ b/crazy_functions/图片生成.py @@ -3,7 +3,7 @@ from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive import datetime -def gen_image(llm_kwargs, prompt, resolution="256x256"): +def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2"): import requests, json, time, os from request_llms.bridge_all import model_info @@ -23,6 +23,7 @@ def gen_image(llm_kwargs, prompt, resolution="256x256"): 'prompt': prompt, 'n': 1, 'size': resolution, + 'model': model, 'response_format': 'url' } response = requests.post(url, headers=headers, json=data, proxies=proxies) @@ -58,7 +59,7 @@ def 图片生成(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文效果不理想, 请尝试英文Prompt。正在处理中 .....")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - resolution = plugin_kwargs.get("advanced_arg", '256x256') + resolution = plugin_kwargs.get("advanced_arg", '1024x1024') image_url, image_path = gen_image(llm_kwargs, prompt, resolution) chatbot.append([prompt, f'图像中转网址:
`{image_url}`
'+ @@ -67,3 +68,29 @@ def 图片生成(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro f'本地文件预览:
' ]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + +@CatchException +def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + """ + txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 + llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 + plugin_kwargs 插件模型的参数,暂时没有用武之地 + chatbot 聊天显示框的句柄,用于显示给用户 + history 聊天历史,前情提要 + system_prompt 给gpt的静默提醒 + web_port 当前软件运行的端口号 + """ + history = [] # 清空历史,以免输入溢出 + chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文效果不理想, 请尝试英文Prompt。正在处理中 .....")) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 + if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") + resolution = plugin_kwargs.get("advanced_arg", '1024x1024') + image_url, image_path = gen_image(llm_kwargs, prompt, resolution) + chatbot.append([prompt, + f'图像中转网址:
`{image_url}`
'+ + f'中转网址预览:
' + f'本地文件地址:
`{image_path}`
'+ + f'本地文件预览:
' + ]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + diff --git a/docs/translate_english.json b/docs/translate_english.json index 850cae54..44361f02 100644 --- a/docs/translate_english.json +++ b/docs/translate_english.json @@ -265,7 +265,7 @@ "例如chatglm&gpt-3.5-turbo&api2d-gpt-4": "e.g. chatglm&gpt-3.5-turbo&api2d-gpt-4", "先切换模型到openai或api2d": "Switch the model to openai or api2d first", "在这里输入分辨率": "Enter the resolution here", - "如256x256": "e.g. 256x256", + "如1024x1024": "e.g. 1024x1024", "默认": "Default", "建议您复制一个config_private.py放自己的秘密": "We suggest you to copy a config_private.py file to keep your secrets, such as API and proxy URLs, from being accidentally uploaded to Github and seen by others.", "如API和代理网址": "Such as API and proxy URLs", diff --git a/docs/translate_japanese.json b/docs/translate_japanese.json index ae86dc06..29ebcc96 100644 --- a/docs/translate_japanese.json +++ b/docs/translate_japanese.json @@ -854,7 +854,7 @@ "查询版本和用户意见": "バージョンとユーザーの意見を検索する", "提取摘要": "要約を抽出する", "在gpt输出代码的中途": "GPTがコードを出力する途中で", - "如256x256": "256x256のように", + "如1024x1024": "1024x1024のように", "概括其内容": "内容を要約する", "剩下的情况都开头除去": "残りの場合はすべて先頭を除去する", "至少一个线程任务意外失败": "少なくとも1つのスレッドタスクが予期しない失敗をした", diff --git a/docs/translate_traditionalchinese.json b/docs/translate_traditionalchinese.json index a677f108..b75cbdb4 100644 --- a/docs/translate_traditionalchinese.json +++ b/docs/translate_traditionalchinese.json @@ -1147,7 +1147,7 @@ "Y+回车=确认": "Y+回車=確認", "正在同时咨询ChatGPT和ChatGLM……": "正在同時諮詢ChatGPT和ChatGLM……", "根据 heuristic 规则": "根據heuristic規則", - "如256x256": "如256x256", + "如1024x1024": "如1024x1024", "函数插件区": "函數插件區", "*** API_KEY 导入成功": "*** API_KEY 導入成功", "请对下面的程序文件做一个概述文件名是": "請對下面的程序文件做一個概述文件名是", From f9fc02948ac360e2906214e274a5ebd4770e8a28 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 10 Nov 2023 21:04:21 +0800 Subject: [PATCH 24/31] =?UTF-8?q?=E6=9B=B4=E6=96=B0=E5=88=86=E8=BE=A8?= =?UTF-8?q?=E7=8E=87=E6=8F=90=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index 60c85691..e82f3995 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -356,7 +356,7 @@ def get_crazy_functions(): "Color": "stop", "AsButton": False, "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认)", # 高级参数输入区的显示提示 + "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示 "Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容", "Function": HotReload(图片生成) }, @@ -367,7 +367,7 @@ def get_crazy_functions(): "Color": "stop", "AsButton": False, "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) - "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认)", # 高级参数输入区的显示提示 + "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 1024x1024, 1792x1024, 1024x1792", # 高级参数输入区的显示提示 "Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容", "Function": HotReload(图片生成_DALLE3) }, From 362b545a45352b011adef023e54f9c34a8110fdf Mon Sep 17 00:00:00 2001 From: xiangsam Date: Fri, 10 Nov 2023 14:25:37 +0000 Subject: [PATCH 25/31] =?UTF-8?q?=E6=9B=B4=E6=94=B9import=20nougat?= =?UTF-8?q?=E6=97=B6=E6=9C=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/批量翻译PDF文档_NOUGAT.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/crazy_functions/批量翻译PDF文档_NOUGAT.py b/crazy_functions/批量翻译PDF文档_NOUGAT.py index 50e34c4e..16dfd6bf 100644 --- a/crazy_functions/批量翻译PDF文档_NOUGAT.py +++ b/crazy_functions/批量翻译PDF文档_NOUGAT.py @@ -57,22 +57,22 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst "批量翻译PDF文档。函数插件贡献者: Binary-Husky"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import nougat - import tiktoken - except: - report_execption(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade nougat-ocr tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - # 清空历史,以免输入溢出 history = [] from .crazy_utils import get_files_from_everything success, file_manifest, project_folder = get_files_from_everything(txt, type='.pdf') + if len(file_manifest) > 0: + # 尝试导入依赖,如果缺少依赖,则给出安装建议 + try: + import nougat + import tiktoken + except: + report_execption(chatbot, history, + a=f"解析项目: {txt}", + b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade nougat-ocr tiktoken```。") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return success_mmd, file_manifest_mmd, _ = get_files_from_everything(txt, type='.mmd') success = success or success_mmd file_manifest += file_manifest_mmd From da7c03e868b89f71b52444a0565ae4d08e50293a Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 10 Nov 2023 22:54:55 +0800 Subject: [PATCH 26/31] =?UTF-8?q?=E5=9B=BE=E5=83=8F=E4=BF=AE=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 17 ++- .../multi_stage/multi_stage_utils.py | 45 +++++++ crazy_functions/图片生成.py | 125 ++++++++++++++++-- toolbox.py | 7 +- 4 files changed, 176 insertions(+), 18 deletions(-) create mode 100644 crazy_functions/multi_stage/multi_stage_utils.py diff --git a/crazy_functional.py b/crazy_functional.py index e82f3995..2e94570c 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -349,16 +349,16 @@ def get_crazy_functions(): print('Load function plugin failed') try: - from crazy_functions.图片生成 import 图片生成, 图片生成_DALLE3 + from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2 function_plugins.update({ - "图片生成(先切换模型到openai或api2d)": { + "图片生成_DALLE2(先切换模型到openai或api2d)": { "Group": "对话", "Color": "stop", "AsButton": False, "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示 "Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容", - "Function": HotReload(图片生成) + "Function": HotReload(图片生成_DALLE2) }, }) function_plugins.update({ @@ -372,6 +372,17 @@ def get_crazy_functions(): "Function": HotReload(图片生成_DALLE3) }, }) + # function_plugins.update({ + # "图片修改_DALLE2(启动DALLE2图像修改向导程序)": { + # "Group": "对话", + # "Color": "stop", + # "AsButton": False, + # "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False) + # "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 1024x1024, 1792x1024, 1024x1792", # 高级参数输入区的显示提示 + # # "Info": "使用DALLE2修改图片 | 输入参数字符串,提供图像的内容", + # "Function": HotReload(图片修改_DALLE2) + # }, + # }) except: print('Load function plugin failed') diff --git a/crazy_functions/multi_stage/multi_stage_utils.py b/crazy_functions/multi_stage/multi_stage_utils.py new file mode 100644 index 00000000..60f07783 --- /dev/null +++ b/crazy_functions/multi_stage/multi_stage_utils.py @@ -0,0 +1,45 @@ +from pydantic import BaseModel, Field +from typing import List +from toolbox import update_ui_lastest_msg, disable_auto_promotion +from request_llms.bridge_all import predict_no_ui_long_connection +from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError +import time +import pickle + +def have_any_recent_upload_files(chatbot): + _5min = 5 * 60 + if not chatbot: return False # chatbot is None + most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None) + if not most_recent_uploaded: return False # most_recent_uploaded is None + if time.time() - most_recent_uploaded["time"] < _5min: return True # most_recent_uploaded is new + else: return False # most_recent_uploaded is too old + +class GptAcademicState(): + def __init__(self): + self.reset() + + def reset(self): + pass + + def lock_plugin(self, chatbot): + chatbot._cookies['plugin_state'] = pickle.dumps(self) + + def unlock_plugin(self, chatbot): + self.reset() + chatbot._cookies['plugin_state'] = pickle.dumps(self) + + def set_state(self, chatbot, key, value): + setattr(self, key, value) + chatbot._cookies['plugin_state'] = pickle.dumps(self) + + def get_state(chatbot, cls=None): + state = chatbot._cookies.get('plugin_state', None) + if state is not None: state = pickle.loads(state) + elif cls is not None: state = cls() + else: state = GptAcademicState() + state.chatbot = chatbot + return state + +class GatherMaterials(): + def __init__(self, materials) -> None: + materials = ['image', 'prompt'] \ No newline at end of file diff --git a/crazy_functions/图片生成.py b/crazy_functions/图片生成.py index 95b44813..4968361a 100644 --- a/crazy_functions/图片生成.py +++ b/crazy_functions/图片生成.py @@ -1,6 +1,5 @@ from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log_folder -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -import datetime +from crazy_functions.multi_stage.multi_stage_utils import GptAcademicState def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2"): @@ -43,9 +42,48 @@ def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2"): return image_url, file_path+file_name +def edit_image(llm_kwargs, prompt, image_path, resolution="1024x1024", model="dall-e-2"): + import requests, json, time, os + from request_llms.bridge_all import model_info + + proxies = get_conf('proxies') + api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) + chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] + # 'https://api.openai.com/v1/chat/completions' + img_endpoint = chat_endpoint.replace('chat/completions','images/edits') + # # Generate the image + url = img_endpoint + headers = { + 'Authorization': f"Bearer {api_key}", + 'Content-Type': 'application/json' + } + data = { + 'image': open(image_path, 'rb'), + 'prompt': prompt, + 'n': 1, + 'size': resolution, + 'model': model, + 'response_format': 'url' + } + response = requests.post(url, headers=headers, json=data, proxies=proxies) + print(response.content) + try: + image_url = json.loads(response.content.decode('utf8'))['data'][0]['url'] + except: + raise RuntimeError(response.content.decode()) + # 文件保存到本地 + r = requests.get(image_url, proxies=proxies) + file_path = f'{get_log_folder()}/image_gen/' + os.makedirs(file_path, exist_ok=True) + file_name = 'Image' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.png' + with open(file_path+file_name, 'wb+') as f: f.write(r.content) + + + return image_url, file_path+file_name + @CatchException -def 图片生成(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): +def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): """ txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 @@ -69,17 +107,9 @@ def 图片生成(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro ]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + @CatchException def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ history = [] # 清空历史,以免输入溢出 chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文效果不理想, 请尝试英文Prompt。正在处理中 .....")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 @@ -94,3 +124,74 @@ def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys ]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + +class ImageEditState(GptAcademicState): + def get_image_file(self, x): + import os, glob + if len(x) == 0: return False, None + if not os.path.exists(x): return False, None + if x.endswith('.png'): return True, x + file_manifest = [f for f in glob.glob(f'{x}/**/*.png', recursive=True)] + confirm = (len(file_manifest) >= 1 and file_manifest[0].endswith('.png') and os.path.exists(file_manifest[0])) + file = None if not confirm else file_manifest[0] + return confirm, file + + def get_resolution(self, x): + return (x in ['256x256', '512x512', '1024x1024']), x + + def get_prompt(self, x): + confirm = (len(x)>=5) and (not self.get_resolution(x)[0]) and (not self.get_image_file(x)[0]) + return confirm, x + + def reset(self): + self.req = [ + {'value':None, 'description': '请先上传图像(必须是.png格式), 然后再次点击本插件', 'verify_fn': self.get_image_file}, + {'value':None, 'description': '请输入分辨率,可选:256x256, 512x512 或 1024x1024', 'verify_fn': self.get_resolution}, + {'value':None, 'description': '请输入修改需求,建议您使用英文提示词', 'verify_fn': self.get_prompt}, + ] + self.info = "" + + def feed(self, prompt, chatbot): + for r in self.req: + if r['value'] is None: + confirm, res = r['verify_fn'](prompt) + if confirm: + r['value'] = res + self.set_state(chatbot, 'dummy_key', 'dummy_value') + break + return self + + def next_req(self): + for r in self.req: + if r['value'] is None: + return r['description'] + return "已经收集到所有信息" + + def already_obtained_all_materials(self): + return all([x['value'] is not None for x in self.req]) + +@CatchException +def 图片修改_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + history = [] # 清空历史 + state = ImageEditState.get_state(chatbot, ImageEditState) + state = state.feed(prompt, chatbot) + if not state.already_obtained_all_materials(): + chatbot.append(["图片修改(先上传图片,再输入修改需求,最后输入分辨率)", state.next_req()]) + yield from update_ui(chatbot=chatbot, history=history) + return + + image_path = state.req[0] + resolution = state.req[1] + prompt = state.req[2] + chatbot.append(["图片修改, 执行中", f"图片:`{image_path}`
分辨率:`{resolution}`
修改需求:`{prompt}`"]) + yield from update_ui(chatbot=chatbot, history=history) + + image_url, image_path = edit_image(llm_kwargs, prompt, image_path, resolution) + chatbot.append([state.prompt, + f'图像中转网址:
`{image_url}`
'+ + f'中转网址预览:
' + f'本地文件地址:
`{image_path}`
'+ + f'本地文件预览:
' + ]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + diff --git a/toolbox.py b/toolbox.py index 8c6e7fae..b1e1ce7b 100644 --- a/toolbox.py +++ b/toolbox.py @@ -625,13 +625,14 @@ def on_file_uploaded(request: gradio.Request, files, chatbot, txt, txt2, checkbo def on_report_generated(cookies, files, chatbot): - from toolbox import find_recent_files - PATH_LOGGING = get_conf('PATH_LOGGING') + # from toolbox import find_recent_files + # PATH_LOGGING = get_conf('PATH_LOGGING') if 'files_to_promote' in cookies: report_files = cookies['files_to_promote'] cookies.pop('files_to_promote') else: - report_files = find_recent_files(PATH_LOGGING) + report_files = [] + # report_files = find_recent_files(PATH_LOGGING) if len(report_files) == 0: return cookies, None, chatbot # files.extend(report_files) From 107ea868e15eac1687fc18249f55f09b4fdff207 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 10 Nov 2023 23:08:56 +0800 Subject: [PATCH 27/31] =?UTF-8?q?API2D=E8=87=AA=E5=8A=A8=E5=AF=B9=E9=BD=90?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 4 ++-- request_llms/bridge_all.py | 22 ++++++++++------------ 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/config.py b/config.py index 38d05198..611b1589 100644 --- a/config.py +++ b/config.py @@ -89,8 +89,8 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体'] LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", - "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', - "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", + "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', + "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm3", "moss", "newbing", "claude-2"] # P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 5da44b79..139d3ae9 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -177,11 +177,11 @@ model_info = { "fn_without_ui": chatgpt_noui, "endpoint": azure_endpoint, "max_token": 8192, - "tokenizer": tokenizer_gpt35, - "token_cnt": get_token_num_gpt35, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, }, - # api_2d + # api_2d (此后不需要在此处添加api2d的接口了,因为下面的代码会自动添加) "api2d-gpt-3.5-turbo": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, @@ -200,15 +200,6 @@ model_info = { "token_cnt": get_token_num_gpt4, }, - "api2d-gpt-3.5-turbo-16k": { - "fn_with_ui": chatgpt_ui, - "fn_without_ui": chatgpt_noui, - "endpoint": api2d_endpoint, - "max_token": 16385, - "tokenizer": tokenizer_gpt35, - "token_cnt": get_token_num_gpt35, - }, - # 将 chatglm 直接对齐到 chatglm2 "chatglm": { "fn_with_ui": chatglm_ui, @@ -244,6 +235,13 @@ model_info = { }, } +# -=-=-=-=-=-=- api2d 对齐支持 -=-=-=-=-=-=- +for model in AVAIL_LLM_MODELS: + if model.startswith('api2d-') and (model.replace('api2d-','') in model_info.keys()): + mi = model_info[model.replace('api2d-','')] + mi.update({"endpoint": api2d_endpoint}) + model_info.update({model: mi}) + # -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=- if "claude-1-100k" in AVAIL_LLM_MODELS or "claude-2" in AVAIL_LLM_MODELS: from .bridge_claude import predict_no_ui_long_connection as claude_noui From 2b917edf26502b2e3c1e81794093f18839cbc42e Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 17:58:17 +0800 Subject: [PATCH 28/31] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=9C=AC=E5=9C=B0?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E5=9C=A8windows=E4=B8=8A=E7=9A=84=E5=85=BC?= =?UTF-8?q?=E5=AE=B9=E6=80=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_chatglm.py | 3 +-- request_llms/bridge_chatglm3.py | 3 +-- request_llms/bridge_chatglmonnx.py | 3 +-- request_llms/bridge_internlm.py | 3 +-- request_llms/bridge_llama2.py | 3 +-- request_llms/bridge_qwen.py | 3 +-- request_llms/local_llm_class.py | 7 ++++--- 7 files changed, 10 insertions(+), 15 deletions(-) diff --git a/request_llms/bridge_chatglm.py b/request_llms/bridge_chatglm.py index 16e1d8fc..83c50da1 100644 --- a/request_llms/bridge_chatglm.py +++ b/request_llms/bridge_chatglm.py @@ -4,14 +4,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" from transformers import AutoModel, AutoTokenizer from toolbox import get_conf, ProxyNetworkActivate -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetGLM2Handle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_chatglm3.py b/request_llms/bridge_chatglm3.py index 461c3064..44656608 100644 --- a/request_llms/bridge_chatglm3.py +++ b/request_llms/bridge_chatglm3.py @@ -4,14 +4,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" from transformers import AutoModel, AutoTokenizer from toolbox import get_conf, ProxyNetworkActivate -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetGLM3Handle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_chatglmonnx.py b/request_llms/bridge_chatglmonnx.py index 312c6846..4b905718 100644 --- a/request_llms/bridge_chatglmonnx.py +++ b/request_llms/bridge_chatglmonnx.py @@ -8,7 +8,7 @@ import threading import importlib from toolbox import update_ui, get_conf from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns from .chatglmoonx import ChatGLMModel, chat_template @@ -17,7 +17,6 @@ from .chatglmoonx import ChatGLMModel, chat_template # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetONNXGLMHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_internlm.py b/request_llms/bridge_internlm.py index 073c193a..b831dc59 100644 --- a/request_llms/bridge_internlm.py +++ b/request_llms/bridge_internlm.py @@ -7,7 +7,7 @@ import threading import importlib from toolbox import update_ui, get_conf from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ @@ -34,7 +34,6 @@ def combine_history(prompt, hist): # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetInternlmHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_llama2.py b/request_llms/bridge_llama2.py index bc8ef7eb..e6da4b75 100644 --- a/request_llms/bridge_llama2.py +++ b/request_llms/bridge_llama2.py @@ -5,14 +5,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer from toolbox import update_ui, get_conf, ProxyNetworkActivate from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns from threading import Thread # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetONNXGLMHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_qwen.py b/request_llms/bridge_qwen.py index 62682cfa..29168f6d 100644 --- a/request_llms/bridge_qwen.py +++ b/request_llms/bridge_qwen.py @@ -8,14 +8,13 @@ import threading import importlib from toolbox import update_ui, get_conf from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetONNXGLMHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index b6ce801e..fe6be961 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -76,7 +76,6 @@ class LocalLLMHandle(Process): self.parent_state, self.child_state = create_queue_pipe() # allow redirect_stdout self.std_tag = "[Subprocess Message] " - self.child.write = lambda x: self.child.send(self.std_tag + x) self.running = True self._model = None self._tokenizer = None @@ -137,6 +136,8 @@ class LocalLLMHandle(Process): def run(self): # 🏃‍♂️🏃‍♂️🏃‍♂️ run in child process # 第一次运行,加载参数 + self.child.flush = lambda *args: None + self.child.write = lambda x: self.child.send(self.std_tag + x) reset_tqdm_output() self.set_state("`尝试加载模型`") try: @@ -220,7 +221,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla """ refer to request_llms/bridge_all.py """ - _llm_handle = LLMSingletonClass() + _llm_handle = SingletonLocalLLM(LLMSingletonClass)() if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + _llm_handle.get_state() if not _llm_handle.running: @@ -268,7 +269,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla """ chatbot.append((inputs, "")) - _llm_handle = LLMSingletonClass() + _llm_handle = SingletonLocalLLM(LLMSingletonClass)() chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.get_state()) yield from update_ui(chatbot=chatbot, history=[]) if not _llm_handle.running: From 2570e4b99705777bb218f3db2dce42b6ce7c7970 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 18:17:58 +0800 Subject: [PATCH 29/31] remove revision --- request_llms/bridge_qwen.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/request_llms/bridge_qwen.py b/request_llms/bridge_qwen.py index 29168f6d..0b226df7 100644 --- a/request_llms/bridge_qwen.py +++ b/request_llms/bridge_qwen.py @@ -30,10 +30,9 @@ class GetONNXGLMHandle(LocalLLMHandle): from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig model_id = 'qwen/Qwen-7B-Chat' - revision = 'v1.0.1' - self._tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True) + self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True) # use fp16 - model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", revision=revision, trust_remote_code=True, fp16=True).eval() + model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval() model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参 self._model = model From e4409b94d1c82bf8f9dabb1696f12fee64f348a9 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 18:30:57 +0800 Subject: [PATCH 30/31] =?UTF-8?q?=E4=BF=AE=E6=AD=A3=E6=8B=BC=E5=86=99=20re?= =?UTF-8?q?port=5Fexecption=20->=20report=5Fexception=20#1220?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/Latex全文润色.py | 20 ++++----- crazy_functions/Latex全文翻译.py | 14 +++---- crazy_functions/Latex输出PDF结果.py | 12 +++--- crazy_functions/agent_fns/auto_agent.py | 2 +- crazy_functions/agent_fns/general.py | 2 +- crazy_functions/下载arxiv论文翻译摘要.py | 6 +-- crazy_functions/总结word文档.py | 8 ++-- crazy_functions/总结音视频.py | 8 ++-- crazy_functions/批量Markdown翻译.py | 20 ++++----- crazy_functions/批量总结PDF文档.py | 8 ++-- crazy_functions/批量总结PDF文档pdfminer.py | 8 ++-- crazy_functions/批量翻译PDF文档_NOUGAT.py | 6 +-- crazy_functions/批量翻译PDF文档_多线程.py | 6 +-- crazy_functions/理解PDF文档内容.py | 8 ++-- crazy_functions/生成函数注释.py | 6 +-- crazy_functions/解析JupyterNotebook.py | 6 +-- crazy_functions/解析项目源代码.py | 48 +++++++++++----------- crazy_functions/读文章写摘要.py | 6 +-- crazy_functions/谷歌检索小助手.py | 6 +-- docs/self_analysis.md | 2 +- toolbox.py | 4 +- 21 files changed, 103 insertions(+), 103 deletions(-) diff --git a/crazy_functions/Latex全文润色.py b/crazy_functions/Latex全文润色.py index 268a3446..0bc7d401 100644 --- a/crazy_functions/Latex全文润色.py +++ b/crazy_functions/Latex全文润色.py @@ -1,5 +1,5 @@ from toolbox import update_ui, trimmed_format_exc, promote_file_to_downloadzone, get_log_folder -from toolbox import CatchException, report_execption, write_history_to_file, zip_folder +from toolbox import CatchException, report_exception, write_history_to_file, zip_folder class PaperFileGroup(): @@ -146,7 +146,7 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -157,12 +157,12 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en') @@ -184,7 +184,7 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -195,12 +195,12 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh') @@ -220,7 +220,7 @@ def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -231,12 +231,12 @@ def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='proofread') diff --git a/crazy_functions/Latex全文翻译.py b/crazy_functions/Latex全文翻译.py index 697f5ac8..846bd80d 100644 --- a/crazy_functions/Latex全文翻译.py +++ b/crazy_functions/Latex全文翻译.py @@ -1,5 +1,5 @@ from toolbox import update_ui, promote_file_to_downloadzone -from toolbox import CatchException, report_execption, write_history_to_file +from toolbox import CatchException, report_exception, write_history_to_file fast_debug = False class PaperFileGroup(): @@ -117,7 +117,7 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -128,12 +128,12 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh') @@ -154,7 +154,7 @@ def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -165,12 +165,12 @@ def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en') \ No newline at end of file diff --git a/crazy_functions/Latex输出PDF结果.py b/crazy_functions/Latex输出PDF结果.py index 9edfea68..a2545ddd 100644 --- a/crazy_functions/Latex输出PDF结果.py +++ b/crazy_functions/Latex输出PDF结果.py @@ -1,5 +1,5 @@ from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone -from toolbox import CatchException, report_execption, update_ui_lastest_msg, zip_result, gen_time_str +from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str from functools import partial import glob, os, requests, time pj = os.path.join @@ -171,12 +171,12 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -249,7 +249,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history = [] txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache) if txt.endswith('.pdf'): - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -258,13 +258,13 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/agent_fns/auto_agent.py b/crazy_functions/agent_fns/auto_agent.py index 16ca2959..f04cbf85 100644 --- a/crazy_functions/agent_fns/auto_agent.py +++ b/crazy_functions/agent_fns/auto_agent.py @@ -1,5 +1,5 @@ from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate -from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton +from toolbox import report_exception, get_log_folder, update_ui_lastest_msg, Singleton from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom from crazy_functions.agent_fns.general import AutoGenGeneral import time diff --git a/crazy_functions/agent_fns/general.py b/crazy_functions/agent_fns/general.py index beb6d7eb..a37f27ae 100644 --- a/crazy_functions/agent_fns/general.py +++ b/crazy_functions/agent_fns/general.py @@ -1,5 +1,5 @@ from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate -from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton +from toolbox import report_exception, get_log_folder, update_ui_lastest_msg, Singleton from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom import time diff --git a/crazy_functions/下载arxiv论文翻译摘要.py b/crazy_functions/下载arxiv论文翻译摘要.py index c711cf45..1e0fe630 100644 --- a/crazy_functions/下载arxiv论文翻译摘要.py +++ b/crazy_functions/下载arxiv论文翻译摘要.py @@ -1,6 +1,6 @@ from toolbox import update_ui, get_log_folder from toolbox import write_history_to_file, promote_file_to_downloadzone -from toolbox import CatchException, report_execption, get_conf +from toolbox import CatchException, report_exception, get_conf import re, requests, unicodedata, os from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive def download_arxiv_(url_pdf): @@ -144,7 +144,7 @@ def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, hi try: import bs4 except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -157,7 +157,7 @@ def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, hi try: pdf_path, info = download_arxiv_(txt) except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"下载pdf文件未成功") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/crazy_functions/总结word文档.py b/crazy_functions/总结word文档.py index 7c822e9f..b3923071 100644 --- a/crazy_functions/总结word文档.py +++ b/crazy_functions/总结word文档.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive fast_debug = False @@ -97,7 +97,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr try: from docx import Document except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -111,7 +111,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -124,7 +124,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/总结音视频.py b/crazy_functions/总结音视频.py index b88775b4..b27bcce0 100644 --- a/crazy_functions/总结音视频.py +++ b/crazy_functions/总结音视频.py @@ -1,4 +1,4 @@ -from toolbox import CatchException, report_execption, select_api_key, update_ui, get_conf +from toolbox import CatchException, report_exception, select_api_key, update_ui, get_conf from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from toolbox import write_history_to_file, promote_file_to_downloadzone, get_log_folder @@ -144,7 +144,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro try: from moviepy.editor import AudioFileClip except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade moviepy```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -158,7 +158,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -174,7 +174,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/批量Markdown翻译.py b/crazy_functions/批量Markdown翻译.py index 2bdffc86..12b4ef09 100644 --- a/crazy_functions/批量Markdown翻译.py +++ b/crazy_functions/批量Markdown翻译.py @@ -1,6 +1,6 @@ import glob, time, os, re, logging from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion -from toolbox import CatchException, report_execption, get_log_folder +from toolbox import CatchException, report_exception, get_log_folder from toolbox import write_history_to_file, promote_file_to_downloadzone fast_debug = False @@ -165,7 +165,7 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -177,12 +177,12 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p if not success: # 什么都没有 if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -205,7 +205,7 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -215,11 +215,11 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p if not success: # 什么都没有 if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en') @@ -238,7 +238,7 @@ def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -248,11 +248,11 @@ def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, if not success: # 什么都没有 if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/批量总结PDF文档.py b/crazy_functions/批量总结PDF文档.py index 57a6cdf1..7fc3e415 100644 --- a/crazy_functions/批量总结PDF文档.py +++ b/crazy_functions/批量总结PDF文档.py @@ -1,5 +1,5 @@ from toolbox import update_ui, promote_file_to_downloadzone, gen_time_str -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from .crazy_utils import read_and_clean_pdf_text @@ -119,7 +119,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst try: import fitz except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -133,7 +133,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -142,7 +142,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/批量总结PDF文档pdfminer.py b/crazy_functions/批量总结PDF文档pdfminer.py index 213d8bb2..a729efaa 100644 --- a/crazy_functions/批量总结PDF文档pdfminer.py +++ b/crazy_functions/批量总结PDF文档pdfminer.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from toolbox import write_history_to_file, promote_file_to_downloadzone @@ -138,7 +138,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo try: import pdfminer, bs4 except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -147,7 +147,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \ @@ -155,7 +155,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/crazy_functions/批量翻译PDF文档_NOUGAT.py b/crazy_functions/批量翻译PDF文档_NOUGAT.py index 16dfd6bf..97170d0e 100644 --- a/crazy_functions/批量翻译PDF文档_NOUGAT.py +++ b/crazy_functions/批量翻译PDF文档_NOUGAT.py @@ -1,4 +1,4 @@ -from toolbox import CatchException, report_execption, get_log_folder, gen_time_str +from toolbox import CatchException, report_exception, get_log_folder, gen_time_str from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive @@ -68,7 +68,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst import nougat import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade nougat-ocr tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -84,7 +84,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/批量翻译PDF文档_多线程.py b/crazy_functions/批量翻译PDF文档_多线程.py index f2e5cf99..333b529b 100644 --- a/crazy_functions/批量翻译PDF文档_多线程.py +++ b/crazy_functions/批量翻译PDF文档_多线程.py @@ -1,4 +1,4 @@ -from toolbox import CatchException, report_execption, get_log_folder, gen_time_str +from toolbox import CatchException, report_exception, get_log_folder, gen_time_str from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive @@ -26,7 +26,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst import tiktoken import scipdf except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf tiktoken scipdf_parser```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -43,7 +43,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/理解PDF文档内容.py b/crazy_functions/理解PDF文档内容.py index 4c0a1052..ef967889 100644 --- a/crazy_functions/理解PDF文档内容.py +++ b/crazy_functions/理解PDF文档内容.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from .crazy_utils import read_and_clean_pdf_text from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive fast_debug = False @@ -81,7 +81,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat try: import fitz except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -96,7 +96,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -105,7 +105,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/生成函数注释.py b/crazy_functions/生成函数注释.py index bf3da6a4..d71a5680 100644 --- a/crazy_functions/生成函数注释.py +++ b/crazy_functions/生成函数注释.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive fast_debug = False @@ -43,14 +43,14 @@ def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/crazy_functions/解析JupyterNotebook.py b/crazy_functions/解析JupyterNotebook.py index 709b7e1c..eeccadf7 100644 --- a/crazy_functions/解析JupyterNotebook.py +++ b/crazy_functions/解析JupyterNotebook.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone fast_debug = True @@ -131,7 +131,7 @@ def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -141,7 +141,7 @@ def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p file_manifest = [f for f in glob.glob( f'{project_folder}/**/*.ipynb', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.ipynb文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/解析项目源代码.py b/crazy_functions/解析项目源代码.py index f17a584d..e319d5a8 100644 --- a/crazy_functions/解析项目源代码.py +++ b/crazy_functions/解析项目源代码.py @@ -1,5 +1,5 @@ from toolbox import update_ui, promote_file_to_downloadzone, disable_auto_promotion -from toolbox import CatchException, report_execption, write_history_to_file +from toolbox import CatchException, report_exception, write_history_to_file from .crazy_utils import input_clipping def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): @@ -113,7 +113,7 @@ def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ [f for f in glob.glob('./*/*.py')] project_folder = './' if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -126,12 +126,12 @@ def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -144,12 +144,12 @@ def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.m', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到任何`.m`源文件: {txt}") + report_exception(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到任何`.m`源文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -162,14 +162,14 @@ def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, his project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] #+ \ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -182,7 +182,7 @@ def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ @@ -190,7 +190,7 @@ def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -204,7 +204,7 @@ def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \ @@ -212,7 +212,7 @@ def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys [f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -226,7 +226,7 @@ def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \ @@ -241,7 +241,7 @@ def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s [f for f in glob.glob(f'{project_folder}/**/*.css', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何前端相关文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何前端相关文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -255,7 +255,7 @@ def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)] + \ @@ -263,7 +263,7 @@ def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s [f for f in glob.glob(f'{project_folder}/**/go.sum', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/go.work', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -276,14 +276,14 @@ def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.rs', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.lock', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -296,7 +296,7 @@ def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.lua', recursive=True)] + \ @@ -304,7 +304,7 @@ def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst [f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何lua文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何lua文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -318,13 +318,13 @@ def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.cs', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.csproj', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何CSharp文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何CSharp文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -352,7 +352,7 @@ def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return # 若上传压缩文件, 先寻找到解压的文件夹路径, 从而避免解析压缩文件 @@ -365,7 +365,7 @@ def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys file_manifest = [f for pattern in pattern_include for f in glob.glob(f'{extract_folder_path}/**/{pattern}', recursive=True) if "" != extract_folder_path and \ os.path.isfile(f) and (not re.search(pattern_except, f) or pattern.endswith('.' + re.search(pattern_except, f).group().split('.')[-1]))] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) \ No newline at end of file diff --git a/crazy_functions/读文章写摘要.py b/crazy_functions/读文章写摘要.py index acdf632c..a43b6aa2 100644 --- a/crazy_functions/读文章写摘要.py +++ b/crazy_functions/读文章写摘要.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive @@ -51,14 +51,14 @@ def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \ # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/crazy_functions/谷歌检索小助手.py b/crazy_functions/谷歌检索小助手.py index 5924a286..14b21bfc 100644 --- a/crazy_functions/谷歌检索小助手.py +++ b/crazy_functions/谷歌检索小助手.py @@ -1,5 +1,5 @@ from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from toolbox import CatchException, report_execption, promote_file_to_downloadzone +from toolbox import CatchException, report_exception, promote_file_to_downloadzone from toolbox import update_ui, update_ui_lastest_msg, disable_auto_promotion, write_history_to_file import logging import requests @@ -29,7 +29,7 @@ def get_meta_information(url, chatbot, history): try: session.proxies.update(proxies) except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"获取代理失败 无代理状态下很可能无法访问OpenAI家族的模型及谷歌学术 建议:检查USE_PROXY选项是否修改。", b=f"尝试直接连接") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -146,7 +146,7 @@ def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst import math from bs4 import BeautifulSoup except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/docs/self_analysis.md b/docs/self_analysis.md index c3736193..0b76c7bd 100644 --- a/docs/self_analysis.md +++ b/docs/self_analysis.md @@ -217,7 +217,7 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和 ## [31/48] 请对下面的程序文件做一个概述: crazy_functions\读文章写摘要.py -这个程序文件是一个Python模块,文件名为crazy_functions\读文章写摘要.py。该模块包含了两个函数,其中主要函数是"读文章写摘要"函数,其实现了解析给定文件夹中的tex文件,对其中每个文件的内容进行摘要生成,并根据各论文片段的摘要,最终生成全文摘要。第二个函数是"解析Paper"函数,用于解析单篇论文文件。其中用到了一些工具函数和库,如update_ui、CatchException、report_execption、write_results_to_file等。 +这个程序文件是一个Python模块,文件名为crazy_functions\读文章写摘要.py。该模块包含了两个函数,其中主要函数是"读文章写摘要"函数,其实现了解析给定文件夹中的tex文件,对其中每个文件的内容进行摘要生成,并根据各论文片段的摘要,最终生成全文摘要。第二个函数是"解析Paper"函数,用于解析单篇论文文件。其中用到了一些工具函数和库,如update_ui、CatchException、report_exception、write_results_to_file等。 ## [32/48] 请对下面的程序文件做一个概述: crazy_functions\谷歌检索小助手.py diff --git a/toolbox.py b/toolbox.py index b1e1ce7b..a5425c08 100644 --- a/toolbox.py +++ b/toolbox.py @@ -187,7 +187,7 @@ def HotReload(f): 其他小工具: - write_history_to_file: 将结果写入markdown文件中 - regular_txt_to_markdown: 将普通文本转换为Markdown格式的文本。 - - report_execption: 向chatbot中添加简单的意外错误信息 + - report_exception: 向chatbot中添加简单的意外错误信息 - text_divide_paragraph: 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。 - markdown_convertion: 用多种方式组合,将markdown转化为好看的html - format_io: 接管gradio默认的markdown处理方式 @@ -260,7 +260,7 @@ def regular_txt_to_markdown(text): -def report_execption(chatbot, history, a, b): +def report_exception(chatbot, history, a, b): """ 向chatbot中添加错误信息 """ From f75e39dc2734c62d7590e137c37c8504fa0eedbb Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 21:11:55 +0800 Subject: [PATCH 31/31] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=9C=AC=E5=9C=B0?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E5=9C=A8Windows=E4=B8=8B=E7=9A=84=E5=8A=A0?= =?UTF-8?q?=E8=BD=BDBUG?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_chatgpt.py | 3 +-- request_llms/bridge_chatgpt_website.py | 3 +-- request_llms/bridge_claude.py | 2 +- request_llms/bridge_internlm.py | 17 +++++++++-------- request_llms/bridge_qwen.py | 15 ++++++++------- request_llms/local_llm_class.py | 2 +- tests/test_llms.py | 4 ++-- version | 4 ++-- 8 files changed, 25 insertions(+), 25 deletions(-) diff --git a/request_llms/bridge_chatgpt.py b/request_llms/bridge_chatgpt.py index 292de0ad..e55ad37a 100644 --- a/request_llms/bridge_chatgpt.py +++ b/request_llms/bridge_chatgpt.py @@ -7,8 +7,7 @@ 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 具备多线程调用能力的函数 - 2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑 - 3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 + 2. predict_no_ui_long_connection:支持多线程 """ import json diff --git a/request_llms/bridge_chatgpt_website.py b/request_llms/bridge_chatgpt_website.py index 7f3147b1..f2f07090 100644 --- a/request_llms/bridge_chatgpt_website.py +++ b/request_llms/bridge_chatgpt_website.py @@ -7,8 +7,7 @@ 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 具备多线程调用能力的函数 - 2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑 - 3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 + 2. predict_no_ui_long_connection:支持多线程 """ import json diff --git a/request_llms/bridge_claude.py b/request_llms/bridge_claude.py index 6084b1f1..42b75052 100644 --- a/request_llms/bridge_claude.py +++ b/request_llms/bridge_claude.py @@ -7,7 +7,7 @@ 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 具备多线程调用能力的函数 - 2. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 + 2. predict_no_ui_long_connection:支持多线程 """ import os diff --git a/request_llms/bridge_internlm.py b/request_llms/bridge_internlm.py index b831dc59..20b53b44 100644 --- a/request_llms/bridge_internlm.py +++ b/request_llms/bridge_internlm.py @@ -5,7 +5,7 @@ from transformers import AutoModel, AutoTokenizer import time import threading import importlib -from toolbox import update_ui, get_conf +from toolbox import update_ui, get_conf, ProxyNetworkActivate from multiprocessing import Process, Pipe from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns @@ -52,14 +52,15 @@ class GetInternlmHandle(LocalLLMHandle): import torch from transformers import AutoModelForCausalLM, AutoTokenizer device = get_conf('LOCAL_MODEL_DEVICE') - if self._model is None: - tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True) - if device=='cpu': - model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16) - else: - model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16).cuda() + with ProxyNetworkActivate('Download_LLM'): + if self._model is None: + tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True) + if device=='cpu': + model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16) + else: + model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16).cuda() - model = model.eval() + model = model.eval() return model, tokenizer def llm_stream_generator(self, **kwargs): diff --git a/request_llms/bridge_qwen.py b/request_llms/bridge_qwen.py index 0b226df7..afd886bf 100644 --- a/request_llms/bridge_qwen.py +++ b/request_llms/bridge_qwen.py @@ -6,7 +6,7 @@ from transformers import AutoModel, AutoTokenizer import time import threading import importlib -from toolbox import update_ui, get_conf +from toolbox import update_ui, get_conf, ProxyNetworkActivate from multiprocessing import Process, Pipe from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns @@ -29,12 +29,13 @@ class GetONNXGLMHandle(LocalLLMHandle): import platform from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig - model_id = 'qwen/Qwen-7B-Chat' - self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True) - # use fp16 - model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval() - model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参 - self._model = model + with ProxyNetworkActivate('Download_LLM'): + model_id = 'qwen/Qwen-7B-Chat' + self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True) + # use fp16 + model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval() + model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参 + self._model = model return self._model, self._tokenizer diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index fe6be961..38fcfc91 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -201,7 +201,7 @@ class LocalLLMHandle(Process): if res.startswith(self.std_tag): new_output = res[len(self.std_tag):] std_out = std_out[:std_out_clip_len] - # print(new_output, end='') + print(new_output, end='') std_out = new_output + std_out yield self.std_tag + '\n```\n' + std_out + '\n```\n' elif res == '[Finish]': diff --git a/tests/test_llms.py b/tests/test_llms.py index 5c5d2f6c..6285f030 100644 --- a/tests/test_llms.py +++ b/tests/test_llms.py @@ -15,11 +15,11 @@ if __name__ == "__main__": # from request_llms.bridge_jittorllms_pangualpha import predict_no_ui_long_connection # from request_llms.bridge_jittorllms_llama import predict_no_ui_long_connection # from request_llms.bridge_claude import predict_no_ui_long_connection - # from request_llms.bridge_internlm import predict_no_ui_long_connection + from request_llms.bridge_internlm import predict_no_ui_long_connection # from request_llms.bridge_qwen import predict_no_ui_long_connection # from request_llms.bridge_spark import predict_no_ui_long_connection # from request_llms.bridge_zhipu import predict_no_ui_long_connection - from request_llms.bridge_chatglm3 import predict_no_ui_long_connection + # from request_llms.bridge_chatglm3 import predict_no_ui_long_connection llm_kwargs = { 'max_length': 4096, diff --git a/version b/version index 5e4fb7d0..69a871e0 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.57, + "version": 3.58, "show_feature": true, - "new_feature": "支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" + "new_feature": "修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" }