diff --git a/check_proxy.py b/check_proxy.py index 3aad33c1..b5ee17d3 100644 --- a/check_proxy.py +++ b/check_proxy.py @@ -1,3 +1,4 @@ +from loguru import logger def check_proxy(proxies, return_ip=False): import requests @@ -19,14 +20,14 @@ def check_proxy(proxies, return_ip=False): else: result = f"代理配置 {proxies_https}, 代理数据解析失败:{data}" if not return_ip: - print(result) + logger.warning(result) return result else: return ip except: result = f"代理配置 {proxies_https}, 代理所在地查询超时,代理可能无效" if not return_ip: - print(result) + logger.warning(result) return result else: return ip @@ -82,25 +83,25 @@ def patch_and_restart(path): import sys import time import glob - from shared_utils.colorful import print亮黄, print亮绿, print亮红 + from shared_utils.colorful import log亮黄, log亮绿, log亮红 # if not using config_private, move origin config.py as config_private.py if not os.path.exists('config_private.py'): - print亮黄('由于您没有设置config_private.py私密配置,现将您的现有配置移动至config_private.py以防止配置丢失,', + log亮黄('由于您没有设置config_private.py私密配置,现将您的现有配置移动至config_private.py以防止配置丢失,', '另外您可以随时在history子文件夹下找回旧版的程序。') shutil.copyfile('config.py', 'config_private.py') path_new_version = glob.glob(path + '/*-master')[0] dir_util.copy_tree(path_new_version, './') - print亮绿('代码已经更新,即将更新pip包依赖……') - for i in reversed(range(5)): time.sleep(1); print(i) + log亮绿('代码已经更新,即将更新pip包依赖……') + for i in reversed(range(5)): time.sleep(1); log亮绿(i) try: import subprocess subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt']) except: - print亮红('pip包依赖安装出现问题,需要手动安装新增的依赖库 `python -m pip install -r requirements.txt`,然后在用常规的`python main.py`的方式启动。') - print亮绿('更新完成,您可以随时在history子文件夹下找回旧版的程序,5s之后重启') - print亮红('假如重启失败,您可能需要手动安装新增的依赖库 `python -m pip install -r requirements.txt`,然后在用常规的`python main.py`的方式启动。') - print(' ------------------------------ -----------------------------------') - for i in reversed(range(8)): time.sleep(1); print(i) + log亮红('pip包依赖安装出现问题,需要手动安装新增的依赖库 `python -m pip install -r requirements.txt`,然后在用常规的`python main.py`的方式启动。') + log亮绿('更新完成,您可以随时在history子文件夹下找回旧版的程序,5s之后重启') + log亮红('假如重启失败,您可能需要手动安装新增的依赖库 `python -m pip install -r requirements.txt`,然后在用常规的`python main.py`的方式启动。') + log亮绿(' ------------------------------ -----------------------------------') + for i in reversed(range(8)): time.sleep(1); log亮绿(i) os.execl(sys.executable, sys.executable, *sys.argv) @@ -135,9 +136,9 @@ def auto_update(raise_error=False): current_version = f.read() current_version = json.loads(current_version)['version'] if (remote_version - current_version) >= 0.01-1e-5: - from shared_utils.colorful import print亮黄 - print亮黄(f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}。{new_feature}') - print('(1)Github更新地址:\nhttps://github.com/binary-husky/chatgpt_academic\n') + from shared_utils.colorful import log亮黄 + log亮黄(f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}。{new_feature}') + logger.info('(1)Github更新地址:\nhttps://github.com/binary-husky/chatgpt_academic\n') user_instruction = input('(2)是否一键更新代码(Y+回车=确认,输入其他/无输入+回车=不更新)?') if user_instruction in ['Y', 'y']: path = backup_and_download(current_version, remote_version) @@ -148,9 +149,9 @@ def auto_update(raise_error=False): if raise_error: from toolbox import trimmed_format_exc msg += trimmed_format_exc() - print(msg) + logger.warning(msg) else: - print('自动更新程序:已禁用') + logger.info('自动更新程序:已禁用') return else: return @@ -159,10 +160,10 @@ def auto_update(raise_error=False): if raise_error: from toolbox import trimmed_format_exc msg += trimmed_format_exc() - print(msg) + logger.info(msg) def warm_up_modules(): - print('正在执行一些模块的预热 ...') + logger.info('正在执行一些模块的预热 ...') from toolbox import ProxyNetworkActivate from request_llms.bridge_all import model_info with ProxyNetworkActivate("Warmup_Modules"): @@ -172,7 +173,7 @@ def warm_up_modules(): enc.encode("模块预热", disallowed_special=()) def warm_up_vectordb(): - print('正在执行一些模块的预热 ...') + logger.info('正在执行一些模块的预热 ...') from toolbox import ProxyNetworkActivate with ProxyNetworkActivate("Warmup_Modules"): import nltk diff --git a/crazy_functional.py b/crazy_functional.py index 31bb17c2..1ddd8c20 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -1,5 +1,6 @@ from toolbox import HotReload # HotReload 的意思是热更新,修改函数插件后,不需要重启程序,代码直接生效 from toolbox import trimmed_format_exc +from loguru import logger def get_crazy_functions(): @@ -429,8 +430,8 @@ def get_crazy_functions(): } ) except: - print(trimmed_format_exc()) - print("Load function plugin failed") + logger.error(trimmed_format_exc()) + logger.error("Load function plugin failed") # try: # from crazy_functions.联网的ChatGPT import 连接网络回答问题 @@ -460,8 +461,8 @@ def get_crazy_functions(): # } # ) # except: - # print(trimmed_format_exc()) - # print("Load function plugin failed") + # logger.error(trimmed_format_exc()) + # logger.error("Load function plugin failed") try: from crazy_functions.SourceCode_Analyse import 解析任意code项目 @@ -479,8 +480,8 @@ def get_crazy_functions(): } ) except: - print(trimmed_format_exc()) - print("Load function plugin failed") + logger.error(trimmed_format_exc()) + logger.error("Load function plugin failed") try: from crazy_functions.询问多个大语言模型 import 同时问询_指定模型 @@ -498,8 +499,8 @@ def get_crazy_functions(): } ) except: - print(trimmed_format_exc()) - print("Load function plugin failed") + logger.error(trimmed_format_exc()) + logger.error("Load function plugin failed") @@ -520,8 +521,8 @@ def get_crazy_functions(): } ) except: - print(trimmed_format_exc()) - print("Load function plugin failed") + logger.error(trimmed_format_exc()) + logger.error("Load function plugin failed") try: from crazy_functions.数学动画生成manim import 动画生成 @@ -538,8 +539,8 @@ def get_crazy_functions(): } ) except: - print(trimmed_format_exc()) - print("Load function plugin failed") + logger.error(trimmed_format_exc()) + logger.error("Load function plugin failed") try: from crazy_functions.Markdown_Translate import Markdown翻译指定语言 @@ -557,8 +558,8 @@ def get_crazy_functions(): } ) except: - print(trimmed_format_exc()) - print("Load function plugin failed") + logger.error(trimmed_format_exc()) + logger.error("Load function plugin failed") try: from crazy_functions.知识库问答 import 知识库文件注入 @@ -576,8 +577,8 @@ def get_crazy_functions(): } ) except: - print(trimmed_format_exc()) - print("Load function plugin failed") + logger.error(trimmed_format_exc()) + logger.error("Load function plugin failed") try: from crazy_functions.知识库问答 import 读取知识库作答 @@ -595,8 +596,8 @@ def get_crazy_functions(): } ) except: - print(trimmed_format_exc()) - print("Load function plugin failed") + logger.error(trimmed_format_exc()) + logger.error("Load function plugin failed") try: from crazy_functions.交互功能函数模板 import 交互功能模板函数 @@ -612,8 +613,8 @@ def get_crazy_functions(): } ) except: - print(trimmed_format_exc()) - print("Load function plugin failed") + logger.error(trimmed_format_exc()) + logger.error("Load function plugin failed") try: @@ -635,8 +636,8 @@ def get_crazy_functions(): } ) except: - print(trimmed_format_exc()) - print("Load function plugin failed") + logger.error(trimmed_format_exc()) + logger.error("Load function plugin failed") try: from crazy_functions.批量翻译PDF文档_NOUGAT import 批量翻译PDF文档 @@ -652,8 +653,8 @@ def get_crazy_functions(): } ) except: - print(trimmed_format_exc()) - print("Load function plugin failed") + logger.error(trimmed_format_exc()) + logger.error("Load function plugin failed") try: from crazy_functions.函数动态生成 import 函数动态生成 @@ -669,8 +670,8 @@ def get_crazy_functions(): } ) except: - print(trimmed_format_exc()) - print("Load function plugin failed") + logger.error(trimmed_format_exc()) + logger.error("Load function plugin failed") try: from crazy_functions.多智能体 import 多智能体终端 @@ -686,8 +687,8 @@ def get_crazy_functions(): } ) except: - print(trimmed_format_exc()) - print("Load function plugin failed") + logger.error(trimmed_format_exc()) + logger.error("Load function plugin failed") try: from crazy_functions.互动小游戏 import 随机小游戏 @@ -703,8 +704,8 @@ def get_crazy_functions(): } ) except: - print(trimmed_format_exc()) - print("Load function plugin failed") + logger.error(trimmed_format_exc()) + logger.error("Load function plugin failed") # try: # from crazy_functions.高级功能函数模板 import 测试图表渲染 @@ -717,7 +718,7 @@ def get_crazy_functions(): # } # }) # except: - # print(trimmed_format_exc()) + # logger.error(trimmed_format_exc()) # print('Load function plugin failed') # try: diff --git a/crazy_functions/Conversation_To_File.py b/crazy_functions/Conversation_To_File.py index 24f94b36..b8408748 100644 --- a/crazy_functions/Conversation_To_File.py +++ b/crazy_functions/Conversation_To_File.py @@ -171,7 +171,7 @@ def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, s system_prompt 给gpt的静默提醒 user_request 当前用户的请求信息(IP地址等) """ - from .crazy_utils import get_files_from_everything + from crazy_functions.crazy_utils import get_files_from_everything success, file_manifest, _ = get_files_from_everything(txt, type='.html') if not success: diff --git a/crazy_functions/Image_Generate.py b/crazy_functions/Image_Generate.py index 70beec81..d4d0a612 100644 --- a/crazy_functions/Image_Generate.py +++ b/crazy_functions/Image_Generate.py @@ -30,7 +30,7 @@ def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2", qual if style is not None: data['style'] = style response = requests.post(url, headers=headers, json=data, proxies=proxies) - print(response.content) + # logger.info(response.content) try: image_url = json.loads(response.content.decode('utf8'))['data'][0]['url'] except: @@ -76,7 +76,7 @@ def edit_image(llm_kwargs, prompt, image_path, resolution="1024x1024", model="da } response = requests.post(url, headers=headers, files=files, proxies=proxies) - print(response.content) + # logger.info(response.content) try: image_url = json.loads(response.content.decode('utf8'))['data'][0]['url'] except: diff --git a/crazy_functions/Latex_Function.py b/crazy_functions/Latex_Function.py index ba03cf12..af020775 100644 --- a/crazy_functions/Latex_Function.py +++ b/crazy_functions/Latex_Function.py @@ -1,6 +1,8 @@ from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone, check_repeat_upload, map_file_to_sha256 from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str from functools import partial +from loguru import logger + import glob, os, requests, time, json, tarfile pj = os.path.join @@ -178,7 +180,7 @@ def pdf2tex_project(pdf_file_path, plugin_kwargs): if response.ok: pdf_id = response.json()["pdf_id"] - print(f"PDF processing initiated. PDF ID: {pdf_id}") + logger.info(f"PDF processing initiated. PDF ID: {pdf_id}") # Step 2: Check processing status while True: @@ -186,12 +188,12 @@ def pdf2tex_project(pdf_file_path, plugin_kwargs): conversion_data = conversion_response.json() if conversion_data["status"] == "completed": - print("PDF processing completed.") + logger.info("PDF processing completed.") break elif conversion_data["status"] == "error": - print("Error occurred during processing.") + logger.info("Error occurred during processing.") else: - print(f"Processing status: {conversion_data['status']}") + logger.info(f"Processing status: {conversion_data['status']}") time.sleep(5) # wait for a few seconds before checking again # Step 3: Save results to local files @@ -206,7 +208,7 @@ def pdf2tex_project(pdf_file_path, plugin_kwargs): output_path = os.path.join(output_dir, output_name) with open(output_path, "wb") as output_file: output_file.write(response.content) - print(f"tex.zip file saved at: {output_path}") + logger.info(f"tex.zip file saved at: {output_path}") import zipfile unzip_dir = os.path.join(output_dir, file_name_wo_dot) @@ -216,7 +218,7 @@ def pdf2tex_project(pdf_file_path, plugin_kwargs): return unzip_dir else: - print(f"Error sending PDF for processing. Status code: {response.status_code}") + logger.error(f"Error sending PDF for processing. Status code: {response.status_code}") return None else: from crazy_functions.pdf_fns.parse_pdf_via_doc2x import 解析PDF_DOC2X_转Latex diff --git a/crazy_functions/Latex全文润色.py b/crazy_functions/Latex全文润色.py index 960571fd..875e5ad4 100644 --- a/crazy_functions/Latex全文润色.py +++ b/crazy_functions/Latex全文润色.py @@ -1,6 +1,6 @@ from toolbox import update_ui, trimmed_format_exc, promote_file_to_downloadzone, get_log_folder from toolbox import CatchException, report_exception, write_history_to_file, zip_folder - +from loguru import logger class PaperFileGroup(): def __init__(self): @@ -33,7 +33,7 @@ class PaperFileGroup(): self.sp_file_index.append(index) self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex") - print('Segmentation: done') + logger.info('Segmentation: done') def merge_result(self): self.file_result = ["" for _ in range(len(self.file_paths))] for r, k in zip(self.sp_file_result, self.sp_file_index): @@ -56,7 +56,7 @@ class PaperFileGroup(): def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='polish'): import time, os, re - from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency + from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency # <-------- 读取Latex文件,删除其中的所有注释 ----------> @@ -122,7 +122,7 @@ def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch pfg.write_result() pfg.zip_result() except: - print(trimmed_format_exc()) + logger.error(trimmed_format_exc()) # <-------- 整理结果,退出 ----------> create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md" diff --git a/crazy_functions/Latex全文翻译.py b/crazy_functions/Latex全文翻译.py index a0802fd5..47824860 100644 --- a/crazy_functions/Latex全文翻译.py +++ b/crazy_functions/Latex全文翻译.py @@ -1,6 +1,6 @@ from toolbox import update_ui, promote_file_to_downloadzone from toolbox import CatchException, report_exception, write_history_to_file -fast_debug = False +from loguru import logger class PaperFileGroup(): def __init__(self): @@ -33,11 +33,11 @@ class PaperFileGroup(): self.sp_file_index.append(index) self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex") - print('Segmentation: done') + logger.info('Segmentation: done') def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'): import time, os, re - from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency + from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency # <-------- 读取Latex文件,删除其中的所有注释 ----------> pfg = PaperFileGroup() diff --git a/crazy_functions/Markdown_Translate.py b/crazy_functions/Markdown_Translate.py index 59f26fee..858d13da 100644 --- a/crazy_functions/Markdown_Translate.py +++ b/crazy_functions/Markdown_Translate.py @@ -1,4 +1,5 @@ -import glob, shutil, os, re, logging +import glob, shutil, os, re +from loguru import logger from toolbox import update_ui, trimmed_format_exc, gen_time_str from toolbox import CatchException, report_exception, get_log_folder from toolbox import write_history_to_file, promote_file_to_downloadzone @@ -34,7 +35,7 @@ class PaperFileGroup(): self.sp_file_contents.append(segment) self.sp_file_index.append(index) self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.md") - logging.info('Segmentation: done') + logger.info('Segmentation: done') def merge_result(self): self.file_result = ["" for _ in range(len(self.file_paths))] @@ -51,7 +52,7 @@ class PaperFileGroup(): return manifest def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'): - from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency + from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency # <-------- 读取Markdown文件,删除其中的所有注释 ----------> pfg = PaperFileGroup() @@ -106,7 +107,7 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch expected_f_name = plugin_kwargs['markdown_expected_output_path'] shutil.copyfile(output_file, expected_f_name) except: - logging.error(trimmed_format_exc()) + logger.error(trimmed_format_exc()) # <-------- 整理结果,退出 ----------> create_report_file_name = gen_time_str() + f"-chatgpt.md" @@ -126,7 +127,7 @@ def get_files_from_everything(txt, preference=''): proxies = get_conf('proxies') # 网络的远程文件 if preference == 'Github': - logging.info('正在从github下载资源 ...') + logger.info('正在从github下载资源 ...') if not txt.endswith('.md'): # Make a request to the GitHub API to retrieve the repository information url = txt.replace("https://github.com/", "https://api.github.com/repos/") + '/readme' diff --git a/crazy_functions/SourceCode_Analyse.py b/crazy_functions/SourceCode_Analyse.py index ea071ed3..9f2c342f 100644 --- a/crazy_functions/SourceCode_Analyse.py +++ b/crazy_functions/SourceCode_Analyse.py @@ -5,8 +5,8 @@ from crazy_functions.crazy_utils import input_clipping def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): import os, copy - from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive + from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency + from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive summary_batch_isolation = True inputs_array = [] diff --git a/crazy_functions/SourceCode_Comment.py b/crazy_functions/SourceCode_Comment.py index e61f14d5..20390800 100644 --- a/crazy_functions/SourceCode_Comment.py +++ b/crazy_functions/SourceCode_Comment.py @@ -73,7 +73,7 @@ def 注释源代码(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch file_tree_struct.manifest[fp].compare_html = compare_html_path with open(compare_html_path, 'w', encoding='utf-8') as f: f.write(html_template) - print('done 1') + # print('done 1') chatbot.append([None, f"正在处理:"]) futures = [] diff --git a/crazy_functions/agent_fns/echo_agent.py b/crazy_functions/agent_fns/echo_agent.py index 52bf72de..1c691cb7 100644 --- a/crazy_functions/agent_fns/echo_agent.py +++ b/crazy_functions/agent_fns/echo_agent.py @@ -1,4 +1,5 @@ from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom +from loguru import logger class EchoDemo(PluginMultiprocessManager): def subprocess_worker(self, child_conn): @@ -16,4 +17,4 @@ class EchoDemo(PluginMultiprocessManager): elif msg.cmd == "terminate": self.child_conn.send(PipeCom("done", "")) break - print('[debug] subprocess_worker terminated') \ No newline at end of file + logger.info('[debug] subprocess_worker terminated') \ No newline at end of file diff --git a/crazy_functions/agent_fns/pipe.py b/crazy_functions/agent_fns/pipe.py index 128507c3..db8e83d5 100644 --- a/crazy_functions/agent_fns/pipe.py +++ b/crazy_functions/agent_fns/pipe.py @@ -1,5 +1,6 @@ from toolbox import get_log_folder, update_ui, gen_time_str, get_conf, promote_file_to_downloadzone from crazy_functions.agent_fns.watchdog import WatchDog +from loguru import logger import time, os class PipeCom: @@ -47,7 +48,7 @@ class PluginMultiprocessManager: def terminate(self): self.p.terminate() self.alive = False - print("[debug] instance terminated") + logger.info("[debug] instance terminated") def subprocess_worker(self, child_conn): # ⭐⭐ run in subprocess diff --git a/crazy_functions/agent_fns/python_comment_agent.py b/crazy_functions/agent_fns/python_comment_agent.py index 66fa5c38..dd4b6ce8 100644 --- a/crazy_functions/agent_fns/python_comment_agent.py +++ b/crazy_functions/agent_fns/python_comment_agent.py @@ -1,10 +1,12 @@ -from toolbox import CatchException, update_ui -from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from request_llms.bridge_all import predict_no_ui_long_connection import datetime import re import os +from loguru import logger from textwrap import dedent +from toolbox import CatchException, update_ui +from request_llms.bridge_all import predict_no_ui_long_connection +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive + # TODO: 解决缩进问题 find_function_end_prompt = ''' @@ -355,7 +357,7 @@ class PythonCodeComment(): try: successful, hint = self.verify_successful(next_batch, result) except Exception as e: - print('ignored exception:\n' + str(e)) + logger.error('ignored exception:\n' + str(e)) break if successful: break diff --git a/crazy_functions/agent_fns/watchdog.py b/crazy_functions/agent_fns/watchdog.py index 7cd14d23..bb9a99d1 100644 --- a/crazy_functions/agent_fns/watchdog.py +++ b/crazy_functions/agent_fns/watchdog.py @@ -1,4 +1,5 @@ import threading, time +from loguru import logger class WatchDog(): def __init__(self, timeout, bark_fn, interval=3, msg="") -> None: @@ -13,7 +14,7 @@ class WatchDog(): while True: if self.kill_dog: break if time.time() - self.last_feed > self.timeout: - if len(self.msg) > 0: print(self.msg) + if len(self.msg) > 0: logger.info(self.msg) self.bark_fn() break time.sleep(self.interval) diff --git a/crazy_functions/chatglm微调工具.py b/crazy_functions/chatglm微调工具.py index 8405fc55..fc5f33da 100644 --- a/crazy_functions/chatglm微调工具.py +++ b/crazy_functions/chatglm微调工具.py @@ -1,5 +1,5 @@ from toolbox import CatchException, update_ui, promote_file_to_downloadzone -from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency +from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency import datetime, json def fetch_items(list_of_items, batch_size): diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index 710fd7f1..5751956f 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -1,8 +1,8 @@ -from toolbox import update_ui, get_conf, trimmed_format_exc, get_max_token, Singleton -from shared_utils.char_visual_effect import scolling_visual_effect -import threading import os -import logging +import threading +from loguru import logger +from shared_utils.char_visual_effect import scolling_visual_effect +from toolbox import update_ui, get_conf, trimmed_format_exc, get_max_token, Singleton def input_clipping(inputs, history, max_token_limit, return_clip_flags=False): """ @@ -133,7 +133,7 @@ def request_gpt_model_in_new_thread_with_ui_alive( except: # 【第三种情况】:其他错误:重试几次 tb_str = '```\n' + trimmed_format_exc() + '```' - print(tb_str) + logger.error(tb_str) mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n" if retry_op > 0: retry_op -= 1 @@ -283,7 +283,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( # 【第三种情况】:其他错误 if detect_timeout(): raise RuntimeError("检测到程序终止。") tb_str = '```\n' + trimmed_format_exc() + '```' - print(tb_str) + logger.error(tb_str) gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n" if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0] if retry_op > 0: @@ -378,7 +378,7 @@ def read_and_clean_pdf_text(fp): import fitz, copy import re import numpy as np - from shared_utils.colorful import print亮黄, print亮绿 + # from shared_utils.colorful import print亮黄, print亮绿 fc = 0 # Index 0 文本 fs = 1 # Index 1 字体 fb = 2 # Index 2 框框 @@ -595,7 +595,7 @@ class nougat_interface(): def nougat_with_timeout(self, command, cwd, timeout=3600): import subprocess from toolbox import ProxyNetworkActivate - logging.info(f'正在执行命令 {command}') + logger.info(f'正在执行命令 {command}') with ProxyNetworkActivate("Nougat_Download"): process = subprocess.Popen(command, shell=False, cwd=cwd, env=os.environ) try: @@ -603,7 +603,7 @@ class nougat_interface(): except subprocess.TimeoutExpired: process.kill() stdout, stderr = process.communicate() - print("Process timed out!") + logger.error("Process timed out!") return False return True diff --git a/crazy_functions/diagram_fns/file_tree.py b/crazy_functions/diagram_fns/file_tree.py index e1800ee9..0d1c8310 100644 --- a/crazy_functions/diagram_fns/file_tree.py +++ b/crazy_functions/diagram_fns/file_tree.py @@ -1,5 +1,6 @@ import os from textwrap import indent +from loguru import logger class FileNode: def __init__(self, name, build_manifest=False): @@ -60,7 +61,7 @@ class FileNode: current_node.children.append(term) def print_files_recursively(self, level=0, code="R0"): - print(' '*level + self.name + ' ' + str(self.is_leaf) + ' ' + str(self.level)) + logger.info(' '*level + self.name + ' ' + str(self.is_leaf) + ' ' + str(self.level)) for j, child in enumerate(self.children): child.print_files_recursively(level=level+1, code=code+str(j)) self.parenting_ship.extend(child.parenting_ship) @@ -123,4 +124,4 @@ if __name__ == "__main__": "用于加载和分割文件中的文本的通用文件加载器用于加载和分割文件中的文本的通用文件加载器用于加载和分割文件中的文本的通用文件加载器", "包含了用于构建和管理向量数据库的函数和类包含了用于构建和管理向量数据库的函数和类包含了用于构建和管理向量数据库的函数和类", ] - print(build_file_tree_mermaid_diagram(file_manifest, file_comments, "项目文件树")) \ No newline at end of file + logger.info(build_file_tree_mermaid_diagram(file_manifest, file_comments, "项目文件树")) \ No newline at end of file diff --git a/crazy_functions/json_fns/pydantic_io.py b/crazy_functions/json_fns/pydantic_io.py index d7093359..bd78cefe 100644 --- a/crazy_functions/json_fns/pydantic_io.py +++ b/crazy_functions/json_fns/pydantic_io.py @@ -24,8 +24,8 @@ class Actor(BaseModel): film_names: List[str] = Field(description="list of names of films they starred in") """ -import json, re, logging - +import json, re +from loguru import logger as logging PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below. diff --git a/crazy_functions/latex_fns/latex_actions.py b/crazy_functions/latex_fns/latex_actions.py index 0ec01b99..4293f0d0 100644 --- a/crazy_functions/latex_fns/latex_actions.py +++ b/crazy_functions/latex_fns/latex_actions.py @@ -1,15 +1,17 @@ +import os +import re +import shutil +import numpy as np +from loguru import logger from toolbox import update_ui, update_ui_lastest_msg, get_log_folder from toolbox import get_conf, promote_file_to_downloadzone -from .latex_toolbox import PRESERVE, TRANSFORM -from .latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace -from .latex_toolbox import reverse_forbidden_text_careful_brace, reverse_forbidden_text, convert_to_linklist, post_process -from .latex_toolbox import fix_content, find_main_tex_file, merge_tex_files, compile_latex_with_timeout -from .latex_toolbox import find_title_and_abs -from .latex_pickle_io import objdump, objload +from crazy_functions.latex_fns.latex_toolbox import PRESERVE, TRANSFORM +from crazy_functions.latex_fns.latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace +from crazy_functions.latex_fns.latex_toolbox import reverse_forbidden_text_careful_brace, reverse_forbidden_text, convert_to_linklist, post_process +from crazy_functions.latex_fns.latex_toolbox import fix_content, find_main_tex_file, merge_tex_files, compile_latex_with_timeout +from crazy_functions.latex_fns.latex_toolbox import find_title_and_abs +from crazy_functions.latex_fns.latex_pickle_io import objdump, objload -import os, shutil -import re -import numpy as np pj = os.path.join @@ -323,7 +325,7 @@ def remove_buggy_lines(file_path, log_path, tex_name, tex_name_pure, n_fix, work buggy_lines = [int(l) for l in buggy_lines] buggy_lines = sorted(buggy_lines) buggy_line = buggy_lines[0]-1 - print("reversing tex line that has errors", buggy_line) + logger.warning("reversing tex line that has errors", buggy_line) # 重组,逆转出错的段落 if buggy_line not in fixed_line: @@ -337,7 +339,7 @@ def remove_buggy_lines(file_path, log_path, tex_name, tex_name_pure, n_fix, work return True, f"{tex_name_pure}_fix_{n_fix}", buggy_lines except: - print("Fatal error occurred, but we cannot identify error, please download zip, read latex log, and compile manually.") + logger.error("Fatal error occurred, but we cannot identify error, please download zip, read latex log, and compile manually.") return False, -1, [-1] @@ -380,7 +382,7 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f if mode!='translate_zh': yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 使用latexdiff生成论文转化前后对比 ...', chatbot, history) # 刷新Gradio前端界面 - print( f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex') + logger.info( f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex') ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex', os.getcwd()) yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面 @@ -419,7 +421,7 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f shutil.copyfile(concat_pdf, pj(work_folder, '..', 'translation', 'comparison.pdf')) promote_file_to_downloadzone(concat_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI except Exception as e: - print(e) + logger.error(e) pass return True # 成功啦 else: @@ -465,4 +467,4 @@ def write_html(sp_file_contents, sp_file_result, chatbot, project_folder): promote_file_to_downloadzone(file=res, chatbot=chatbot) except: from toolbox import trimmed_format_exc - print('writing html result failed:', trimmed_format_exc()) + logger.error('writing html result failed:', trimmed_format_exc()) diff --git a/crazy_functions/latex_fns/latex_toolbox.py b/crazy_functions/latex_fns/latex_toolbox.py index bbd1bb3c..81e191ab 100644 --- a/crazy_functions/latex_fns/latex_toolbox.py +++ b/crazy_functions/latex_fns/latex_toolbox.py @@ -1,6 +1,8 @@ -import os, shutil +import os import re +import shutil import numpy as np +from loguru import logger PRESERVE = 0 TRANSFORM = 1 @@ -55,7 +57,7 @@ def post_process(root): str_stack.append("{") elif c == "}": if len(str_stack) == 1: - print("stack fix") + logger.warning("fixing brace error") return i str_stack.pop(-1) else: @@ -601,7 +603,7 @@ def compile_latex_with_timeout(command, cwd, timeout=60): except subprocess.TimeoutExpired: process.kill() stdout, stderr = process.communicate() - print("Process timed out!") + logger.error("Process timed out (compile_latex_with_timeout)!") return False return True diff --git a/crazy_functions/live_audio/aliyunASR.py b/crazy_functions/live_audio/aliyunASR.py index 3a523287..c1d646dc 100644 --- a/crazy_functions/live_audio/aliyunASR.py +++ b/crazy_functions/live_audio/aliyunASR.py @@ -1,5 +1,6 @@ -import time, logging, json, sys, struct +import time, json, sys, struct import numpy as np +from loguru import logger as logging from scipy.io.wavfile import WAVE_FORMAT def write_numpy_to_wave(filename, rate, data, add_header=False): @@ -106,18 +107,14 @@ def is_speaker_speaking(vad, data, sample_rate): class AliyunASR(): def test_on_sentence_begin(self, message, *args): - # print("test_on_sentence_begin:{}".format(message)) pass def test_on_sentence_end(self, message, *args): - # print("test_on_sentence_end:{}".format(message)) message = json.loads(message) self.parsed_sentence = message['payload']['result'] self.event_on_entence_end.set() - # print(self.parsed_sentence) def test_on_start(self, message, *args): - # print("test_on_start:{}".format(message)) pass def test_on_error(self, message, *args): @@ -129,13 +126,11 @@ class AliyunASR(): pass def test_on_result_chg(self, message, *args): - # print("test_on_chg:{}".format(message)) message = json.loads(message) self.parsed_text = message['payload']['result'] self.event_on_result_chg.set() def test_on_completed(self, message, *args): - # print("on_completed:args=>{} message=>{}".format(args, message)) pass def audio_convertion_thread(self, uuid): @@ -248,14 +243,14 @@ class AliyunASR(): try: response = client.do_action_with_exception(request) - print(response) + logging.info(response) jss = json.loads(response) if 'Token' in jss and 'Id' in jss['Token']: token = jss['Token']['Id'] expireTime = jss['Token']['ExpireTime'] - print("token = " + token) - print("expireTime = " + str(expireTime)) + logging.info("token = " + token) + logging.info("expireTime = " + str(expireTime)) except Exception as e: - print(e) + logging.error(e) return token diff --git a/crazy_functions/pdf_fns/breakdown_txt.py b/crazy_functions/pdf_fns/breakdown_txt.py index 784d796b..d89ce9fc 100644 --- a/crazy_functions/pdf_fns/breakdown_txt.py +++ b/crazy_functions/pdf_fns/breakdown_txt.py @@ -1,4 +1,5 @@ from crazy_functions.ipc_fns.mp import run_in_subprocess_with_timeout +from loguru import logger def force_breakdown(txt, limit, get_token_fn): """ 当无法用标点、空行分割时,我们用最暴力的方法切割 @@ -76,7 +77,7 @@ def cut(limit, get_token_fn, txt_tocut, must_break_at_empty_line, break_anyway=F remain_txt_to_cut = post remain_txt_to_cut, remain_txt_to_cut_storage = maintain_storage(remain_txt_to_cut, remain_txt_to_cut_storage) process = fin_len/total_len - print(f'正在文本切分 {int(process*100)}%') + logger.info(f'正在文本切分 {int(process*100)}%') if len(remain_txt_to_cut.strip()) == 0: break return res @@ -119,7 +120,7 @@ if __name__ == '__main__': for i in range(5): file_content += file_content - print(len(file_content)) + logger.info(len(file_content)) TOKEN_LIMIT_PER_FRAGMENT = 2500 res = breakdown_text_to_satisfy_token_limit(file_content, TOKEN_LIMIT_PER_FRAGMENT) diff --git a/crazy_functions/pdf_fns/parse_pdf_legacy.py b/crazy_functions/pdf_fns/parse_pdf_legacy.py index 482cd055..56cdb844 100644 --- a/crazy_functions/pdf_fns/parse_pdf_legacy.py +++ b/crazy_functions/pdf_fns/parse_pdf_legacy.py @@ -5,6 +5,7 @@ from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_ from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency from crazy_functions.crazy_utils import read_and_clean_pdf_text from shared_utils.colorful import * +from loguru import logger import os def 解析PDF_简单拆解(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): @@ -93,7 +94,7 @@ def 解析PDF_简单拆解(file_manifest, project_folder, llm_kwargs, plugin_kwa generated_html_files.append(ch.save_file(create_report_file_name)) except: from toolbox import trimmed_format_exc - print('writing html result failed:', trimmed_format_exc()) + logger.error('writing html result failed:', trimmed_format_exc()) # 准备文件的下载 for pdf_path in generated_conclusion_files: diff --git a/crazy_functions/rag_fns/llama_index_worker.py b/crazy_functions/rag_fns/llama_index_worker.py index 7a559927..f6f7f0ab 100644 --- a/crazy_functions/rag_fns/llama_index_worker.py +++ b/crazy_functions/rag_fns/llama_index_worker.py @@ -1,6 +1,7 @@ import llama_index import os import atexit +from loguru import logger from typing import List from llama_index.core import Document from llama_index.core.schema import TextNode @@ -41,14 +42,14 @@ class SaveLoad(): return True def save_to_checkpoint(self, checkpoint_dir=None): - print(f'saving vector store to: {checkpoint_dir}') + logger.info(f'saving vector store to: {checkpoint_dir}') if checkpoint_dir is None: checkpoint_dir = self.checkpoint_dir self.vs_index.storage_context.persist(persist_dir=checkpoint_dir) def load_from_checkpoint(self, checkpoint_dir=None): if checkpoint_dir is None: checkpoint_dir = self.checkpoint_dir if self.does_checkpoint_exist(checkpoint_dir=checkpoint_dir): - print('loading checkpoint from disk') + logger.info('loading checkpoint from disk') from llama_index.core import StorageContext, load_index_from_storage storage_context = StorageContext.from_defaults(persist_dir=checkpoint_dir) self.vs_index = load_index_from_storage(storage_context, embed_model=self.embed_model) @@ -85,9 +86,9 @@ class LlamaIndexRagWorker(SaveLoad): self.vs_index.storage_context.index_store.to_dict() docstore = self.vs_index.storage_context.docstore.docs vector_store_preview = "\n".join([ f"{_id} | {tn.text}" for _id, tn in docstore.items() ]) - print('\n++ --------inspect_vector_store begin--------') - print(vector_store_preview) - print('oo --------inspect_vector_store end--------') + logger.info('\n++ --------inspect_vector_store begin--------') + logger.info(vector_store_preview) + logger.info('oo --------inspect_vector_store end--------') return vector_store_preview def add_documents_to_vector_store(self, document_list): @@ -125,5 +126,5 @@ class LlamaIndexRagWorker(SaveLoad): def generate_node_array_preview(self, nodes): buf = "\n".join(([f"(No.{i+1} | score {n.score:.3f}): {n.text}" for i, n in enumerate(nodes)])) - if self.debug_mode: print(buf) + if self.debug_mode: logger.info(buf) return buf diff --git a/crazy_functions/rag_fns/milvus_worker.py b/crazy_functions/rag_fns/milvus_worker.py index 4b5b0ad9..6eccb6a7 100644 --- a/crazy_functions/rag_fns/milvus_worker.py +++ b/crazy_functions/rag_fns/milvus_worker.py @@ -2,6 +2,7 @@ import llama_index import os import atexit from typing import List +from loguru import logger from llama_index.core import Document from llama_index.core.schema import TextNode from request_llms.embed_models.openai_embed import OpenAiEmbeddingModel @@ -44,14 +45,14 @@ class MilvusSaveLoad(): return True def save_to_checkpoint(self, checkpoint_dir=None): - print(f'saving vector store to: {checkpoint_dir}') + logger.info(f'saving vector store to: {checkpoint_dir}') # if checkpoint_dir is None: checkpoint_dir = self.checkpoint_dir # self.vs_index.storage_context.persist(persist_dir=checkpoint_dir) def load_from_checkpoint(self, checkpoint_dir=None): if checkpoint_dir is None: checkpoint_dir = self.checkpoint_dir if self.does_checkpoint_exist(checkpoint_dir=checkpoint_dir): - print('loading checkpoint from disk') + logger.info('loading checkpoint from disk') from llama_index.core import StorageContext, load_index_from_storage storage_context = StorageContext.from_defaults(persist_dir=checkpoint_dir) try: @@ -101,7 +102,7 @@ class MilvusRagWorker(MilvusSaveLoad, LlamaIndexRagWorker): vector_store_preview = "\n".join( [f"{node.id_} | {node.text}" for node in dummy_retrieve_res] ) - print('\n++ --------inspect_vector_store begin--------') - print(vector_store_preview) - print('oo --------inspect_vector_store end--------') + logger.info('\n++ --------inspect_vector_store begin--------') + logger.info(vector_store_preview) + logger.info('oo --------inspect_vector_store end--------') return vector_store_preview diff --git a/crazy_functions/vector_fns/vector_database.py b/crazy_functions/vector_fns/vector_database.py index 46fc72d6..cd93ae92 100644 --- a/crazy_functions/vector_fns/vector_database.py +++ b/crazy_functions/vector_fns/vector_database.py @@ -1,16 +1,17 @@ # From project chatglm-langchain -import threading -from toolbox import Singleton import os -import shutil import os import uuid import tqdm +import shutil +import threading +import numpy as np +from toolbox import Singleton +from loguru import logger from langchain.vectorstores import FAISS from langchain.docstore.document import Document from typing import List, Tuple -import numpy as np from crazy_functions.vector_fns.general_file_loader import load_file embedding_model_dict = { @@ -150,17 +151,17 @@ class LocalDocQA: failed_files = [] if isinstance(filepath, str): if not os.path.exists(filepath): - print("路径不存在") + logger.error("路径不存在") return None elif os.path.isfile(filepath): file = os.path.split(filepath)[-1] try: docs = load_file(filepath, SENTENCE_SIZE) - print(f"{file} 已成功加载") + logger.info(f"{file} 已成功加载") loaded_files.append(filepath) except Exception as e: - print(e) - print(f"{file} 未能成功加载") + logger.error(e) + logger.error(f"{file} 未能成功加载") return None elif os.path.isdir(filepath): docs = [] @@ -170,23 +171,23 @@ class LocalDocQA: docs += load_file(fullfilepath, SENTENCE_SIZE) loaded_files.append(fullfilepath) except Exception as e: - print(e) + logger.error(e) failed_files.append(file) if len(failed_files) > 0: - print("以下文件未能成功加载:") + logger.error("以下文件未能成功加载:") for file in failed_files: - print(f"{file}\n") + logger.error(f"{file}\n") else: docs = [] for file in filepath: docs += load_file(file, SENTENCE_SIZE) - print(f"{file} 已成功加载") + logger.info(f"{file} 已成功加载") loaded_files.append(file) if len(docs) > 0: - print("文件加载完毕,正在生成向量库") + logger.info("文件加载完毕,正在生成向量库") if vs_path and os.path.isdir(vs_path): try: self.vector_store = FAISS.load_local(vs_path, text2vec) @@ -233,7 +234,7 @@ class LocalDocQA: prompt += "\n\n".join([f"({k}): " + doc.page_content for k, doc in enumerate(related_docs_with_score)]) prompt += "\n\n---\n\n" prompt = prompt.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars - # print(prompt) + # logger.info(prompt) response = {"query": query, "source_documents": related_docs_with_score} return response, prompt @@ -262,7 +263,7 @@ def construct_vector_store(vs_id, vs_path, files, sentence_size, history, one_co else: pass # file_status = "文件未成功加载,请重新上传文件" - # print(file_status) + # logger.info(file_status) return local_doc_qa, vs_path @Singleton @@ -278,7 +279,7 @@ class knowledge_archive_interface(): if self.text2vec_large_chinese is None: # < -------------------预热文本向量化模组--------------- > from toolbox import ProxyNetworkActivate - print('Checking Text2vec ...') + logger.info('Checking Text2vec ...') from langchain.embeddings.huggingface import HuggingFaceEmbeddings with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络 self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese") diff --git a/crazy_functions/下载arxiv论文翻译摘要.py b/crazy_functions/下载arxiv论文翻译摘要.py index 4360df7f..25e4d01d 100644 --- a/crazy_functions/下载arxiv论文翻译摘要.py +++ b/crazy_functions/下载arxiv论文翻译摘要.py @@ -1,17 +1,19 @@ +import re, requests, unicodedata, os from toolbox import update_ui, get_log_folder from toolbox import write_history_to_file, promote_file_to_downloadzone from toolbox import CatchException, report_exception, get_conf -import re, requests, unicodedata, os -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from loguru import logger + def download_arxiv_(url_pdf): if 'arxiv.org' not in url_pdf: if ('.' in url_pdf) and ('/' not in url_pdf): new_url = 'https://arxiv.org/abs/'+url_pdf - print('下载编号:', url_pdf, '自动定位:', new_url) + logger.info('下载编号:', url_pdf, '自动定位:', new_url) # download_arxiv_(new_url) return download_arxiv_(new_url) else: - print('不能识别的URL!') + logger.info('不能识别的URL!') return None if 'abs' in url_pdf: url_pdf = url_pdf.replace('abs', 'pdf') @@ -42,15 +44,12 @@ def download_arxiv_(url_pdf): requests_pdf_url = url_pdf file_path = download_dir+title_str - print('下载中') + logger.info('下载中') proxies = get_conf('proxies') r = requests.get(requests_pdf_url, proxies=proxies) with open(file_path, 'wb+') as f: f.write(r.content) - print('下载完成') - - # print('输出下载命令:','aria2c -o \"%s\" %s'%(title_str,url_pdf)) - # subprocess.call('aria2c --all-proxy=\"172.18.116.150:11084\" -o \"%s\" %s'%(download_dir+title_str,url_pdf), shell=True) + logger.info('下载完成') x = "%s %s %s.bib" % (paper_id, other_info['year'], other_info['authors']) x = x.replace('?', '?')\ @@ -63,19 +62,9 @@ def download_arxiv_(url_pdf): def get_name(_url_): - import os from bs4 import BeautifulSoup - print('正在获取文献名!') - print(_url_) - - # arxiv_recall = {} - # if os.path.exists('./arxiv_recall.pkl'): - # with open('./arxiv_recall.pkl', 'rb') as f: - # arxiv_recall = pickle.load(f) - - # if _url_ in arxiv_recall: - # print('在缓存中') - # return arxiv_recall[_url_] + logger.info('正在获取文献名!') + logger.info(_url_) proxies = get_conf('proxies') res = requests.get(_url_, proxies=proxies) @@ -92,7 +81,7 @@ def get_name(_url_): other_details['abstract'] = abstract except: other_details['year'] = '' - print('年份获取失败') + logger.info('年份获取失败') # get author try: @@ -101,7 +90,7 @@ def get_name(_url_): other_details['authors'] = authors except: other_details['authors'] = '' - print('authors获取失败') + logger.info('authors获取失败') # get comment try: @@ -116,11 +105,11 @@ def get_name(_url_): other_details['comment'] = '' except: other_details['comment'] = '' - print('年份获取失败') + logger.info('年份获取失败') title_str = BeautifulSoup( res.text, 'html.parser').find('title').contents[0] - print('获取成功:', title_str) + logger.info('获取成功:', title_str) # arxiv_recall[_url_] = (title_str+'.pdf', other_details) # with open('./arxiv_recall.pkl', 'wb') as f: # pickle.dump(arxiv_recall, f) diff --git a/crazy_functions/交互功能函数模板.py b/crazy_functions/交互功能函数模板.py index 4a8ae6f6..ec9836a4 100644 --- a/crazy_functions/交互功能函数模板.py +++ b/crazy_functions/交互功能函数模板.py @@ -1,6 +1,5 @@ from toolbox import CatchException, update_ui -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive - +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive @CatchException def 交互功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): diff --git a/crazy_functions/函数动态生成.py b/crazy_functions/函数动态生成.py index 2ca23559..cd1c8cd4 100644 --- a/crazy_functions/函数动态生成.py +++ b/crazy_functions/函数动态生成.py @@ -16,8 +16,8 @@ Testing: from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_lastest_msg -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg -from .crazy_utils import input_clipping, try_install_deps +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg +from crazy_functions.crazy_utils import input_clipping, try_install_deps from crazy_functions.gen_fns.gen_fns_shared import is_function_successfully_generated from crazy_functions.gen_fns.gen_fns_shared import get_class_name from crazy_functions.gen_fns.gen_fns_shared import subprocess_worker diff --git a/crazy_functions/命令行助手.py b/crazy_functions/命令行助手.py index 43c6d8fb..390ce177 100644 --- a/crazy_functions/命令行助手.py +++ b/crazy_functions/命令行助手.py @@ -1,6 +1,6 @@ from toolbox import CatchException, update_ui, gen_time_str -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from .crazy_utils import input_clipping +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from crazy_functions.crazy_utils import input_clipping import copy, json @CatchException diff --git a/crazy_functions/多智能体.py b/crazy_functions/多智能体.py index 00e4539c..37cfeab5 100644 --- a/crazy_functions/多智能体.py +++ b/crazy_functions/多智能体.py @@ -6,13 +6,14 @@ """ +import time from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate from toolbox import get_conf, select_api_key, update_ui_lastest_msg, Singleton from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg from crazy_functions.crazy_utils import input_clipping, try_install_deps from crazy_functions.agent_fns.persistent import GradioMultiuserManagerForPersistentClasses from crazy_functions.agent_fns.auto_agent import AutoGenMath -import time +from loguru import logger def remove_model_prefix(llm): if llm.startswith('api2d-'): llm = llm.replace('api2d-', '') @@ -80,12 +81,12 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ persistent_key = f"{user_uuid}->多智能体终端" if persistent_class_multi_user_manager.already_alive(persistent_key): # 当已经存在一个正在运行的多智能体终端时,直接将用户输入传递给它,而不是再次启动一个新的多智能体终端 - print('[debug] feed new user input') + logger.info('[debug] feed new user input') executor = persistent_class_multi_user_manager.get(persistent_key) exit_reason = yield from executor.main_process_ui_control(txt, create_or_resume="resume") else: # 运行多智能体终端 (首次) - print('[debug] create new executor instance') + logger.info('[debug] create new executor instance') history = [] chatbot.append(["正在启动: 多智能体终端", "插件动态生成, 执行开始, 作者 Microsoft & Binary-Husky."]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/crazy_functions/总结word文档.py b/crazy_functions/总结word文档.py index c27c952f..99f0919b 100644 --- a/crazy_functions/总结word文档.py +++ b/crazy_functions/总结word文档.py @@ -1,7 +1,7 @@ from toolbox import update_ui from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive fast_debug = False diff --git a/crazy_functions/总结音视频.py b/crazy_functions/总结音视频.py index b27bcce0..6adaca1a 100644 --- a/crazy_functions/总结音视频.py +++ b/crazy_functions/总结音视频.py @@ -1,5 +1,5 @@ from toolbox import CatchException, report_exception, select_api_key, update_ui, get_conf -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from toolbox import write_history_to_file, promote_file_to_downloadzone, get_log_folder def split_audio_file(filename, split_duration=1000): diff --git a/crazy_functions/批量总结PDF文档.py b/crazy_functions/批量总结PDF文档.py index 4bd772fe..a23750df 100644 --- a/crazy_functions/批量总结PDF文档.py +++ b/crazy_functions/批量总结PDF文档.py @@ -1,16 +1,18 @@ +from loguru import logger + from toolbox import update_ui, promote_file_to_downloadzone, gen_time_str from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from .crazy_utils import read_and_clean_pdf_text -from .crazy_utils import input_clipping +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from crazy_functions.crazy_utils import read_and_clean_pdf_text +from crazy_functions.crazy_utils import input_clipping def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): file_write_buffer = [] for file_name in file_manifest: - print('begin analysis on:', file_name) + logger.info('begin analysis on:', file_name) ############################## <第 0 步,切割PDF> ################################## # 递归地切割PDF文件,每一块(尽量是完整的一个section,比如introduction,experiment等,必要时再进行切割) # 的长度必须小于 2500 个 Token @@ -38,7 +40,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, last_iteration_result = paper_meta # 初始值是摘要 MAX_WORD_TOTAL = 4096 * 0.7 n_fragment = len(paper_fragments) - if n_fragment >= 20: print('文章极长,不能达到预期效果') + if n_fragment >= 20: logger.warning('文章极长,不能达到预期效果') for i in range(n_fragment): NUM_OF_WORD = MAX_WORD_TOTAL // n_fragment i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} Chinese characters: {paper_fragments[i]}" diff --git a/crazy_functions/批量总结PDF文档pdfminer.py b/crazy_functions/批量总结PDF文档pdfminer.py index b5abc71d..0afc9968 100644 --- a/crazy_functions/批量总结PDF文档pdfminer.py +++ b/crazy_functions/批量总结PDF文档pdfminer.py @@ -1,6 +1,7 @@ +from loguru import logger from toolbox import update_ui from toolbox import CatchException, report_exception -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from toolbox import write_history_to_file, promote_file_to_downloadzone fast_debug = False @@ -57,7 +58,6 @@ def readPdf(pdfPath): layout = device.get_result() for obj in layout._objs: if isinstance(obj, pdfminer.layout.LTTextBoxHorizontal): - # print(obj.get_text()) outTextList.append(obj.get_text()) return outTextList @@ -66,7 +66,7 @@ def readPdf(pdfPath): def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): import time, glob, os from bs4 import BeautifulSoup - print('begin analysis on:', file_manifest) + logger.info('begin analysis on:', file_manifest) for index, fp in enumerate(file_manifest): if ".tex" in fp: with open(fp, 'r', encoding='utf-8', errors='replace') as f: diff --git a/crazy_functions/批量翻译PDF文档_NOUGAT.py b/crazy_functions/批量翻译PDF文档_NOUGAT.py index a124150f..130dde8f 100644 --- a/crazy_functions/批量翻译PDF文档_NOUGAT.py +++ b/crazy_functions/批量翻译PDF文档_NOUGAT.py @@ -1,9 +1,9 @@ from toolbox import CatchException, report_exception, get_log_folder, gen_time_str from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion from toolbox import write_history_to_file, promote_file_to_downloadzone -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency -from .crazy_utils import read_and_clean_pdf_text +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency +from crazy_functions.crazy_utils import read_and_clean_pdf_text from .pdf_fns.parse_pdf import parse_pdf, get_avail_grobid_url, translate_pdf from shared_utils.colorful import * import copy @@ -60,7 +60,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst # 清空历史,以免输入溢出 history = [] - from .crazy_utils import get_files_from_everything + from crazy_functions.crazy_utils import get_files_from_everything success, file_manifest, project_folder = get_files_from_everything(txt, type='.pdf') if len(file_manifest) > 0: # 尝试导入依赖,如果缺少依赖,则给出安装建议 diff --git a/crazy_functions/数学动画生成manim.py b/crazy_functions/数学动画生成manim.py index 551a8081..4a074e43 100644 --- a/crazy_functions/数学动画生成manim.py +++ b/crazy_functions/数学动画生成manim.py @@ -1,4 +1,5 @@ import os +from loguru import logger from toolbox import CatchException, update_ui, gen_time_str, promote_file_to_downloadzone from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from crazy_functions.crazy_utils import input_clipping @@ -34,10 +35,10 @@ def eval_manim(code): return f'gpt_log/{time_str}.mp4' except subprocess.CalledProcessError as e: output = e.output.decode() - print(f"Command returned non-zero exit status {e.returncode}: {output}.") + logger.error(f"Command returned non-zero exit status {e.returncode}: {output}.") return f"Evaluating python script failed: {e.output}." except: - print('generating mp4 failed') + logger.error('generating mp4 failed') return "Generating mp4 failed." diff --git a/crazy_functions/理解PDF文档内容.py b/crazy_functions/理解PDF文档内容.py index fd935ab7..23e3ce4f 100644 --- a/crazy_functions/理解PDF文档内容.py +++ b/crazy_functions/理解PDF文档内容.py @@ -1,13 +1,12 @@ +from loguru import logger from toolbox import update_ui from toolbox import CatchException, report_exception -from .crazy_utils import read_and_clean_pdf_text -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -fast_debug = False +from crazy_functions.crazy_utils import read_and_clean_pdf_text +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import tiktoken - print('begin analysis on:', file_name) + logger.info('begin analysis on:', file_name) ############################## <第 0 步,切割PDF> ################################## # 递归地切割PDF文件,每一块(尽量是完整的一个section,比如introduction,experiment等,必要时再进行切割) @@ -36,7 +35,7 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro last_iteration_result = paper_meta # 初始值是摘要 MAX_WORD_TOTAL = 4096 n_fragment = len(paper_fragments) - if n_fragment >= 20: print('文章极长,不能达到预期效果') + if n_fragment >= 20: logger.warning('文章极长,不能达到预期效果') for i in range(n_fragment): NUM_OF_WORD = MAX_WORD_TOTAL // n_fragment i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i]}" @@ -57,7 +56,7 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro chatbot.append([i_say_show_user, gpt_say]) ############################## <第 4 步,设置一个token上限,防止回答时Token溢出> ################################## - from .crazy_utils import input_clipping + from crazy_functions.crazy_utils import input_clipping _, final_results = input_clipping("", final_results, max_token_limit=3200) yield from update_ui(chatbot=chatbot, history=final_results) # 注意这里的历史记录被替代了 diff --git a/crazy_functions/生成函数注释.py b/crazy_functions/生成函数注释.py index b563ba66..64a3176c 100644 --- a/crazy_functions/生成函数注释.py +++ b/crazy_functions/生成函数注释.py @@ -1,12 +1,12 @@ +from loguru import logger from toolbox import update_ui from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -fast_debug = False +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): import time, os - print('begin analysis on:', file_manifest) + logger.info('begin analysis on:', file_manifest) for index, fp in enumerate(file_manifest): with open(fp, 'r', encoding='utf-8', errors='replace') as f: file_content = f.read() @@ -16,22 +16,20 @@ def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时 + msg = '正常' + # ** gpt request ** + gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( + i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时 - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - if not fast_debug: time.sleep(2) - - if not fast_debug: - res = write_history_to_file(history) - promote_file_to_downloadzone(res, chatbot=chatbot) - chatbot.append(("完成了吗?", res)) + chatbot[-1] = (i_say_show_user, gpt_say) + history.append(i_say_show_user); history.append(gpt_say) yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 + time.sleep(2) + + res = write_history_to_file(history) + promote_file_to_downloadzone(res, chatbot=chatbot) + chatbot.append(("完成了吗?", res)) + yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 diff --git a/crazy_functions/生成多种Mermaid图表.py b/crazy_functions/生成多种Mermaid图表.py index 677ddf8f..fff7f5d8 100644 --- a/crazy_functions/生成多种Mermaid图表.py +++ b/crazy_functions/生成多种Mermaid图表.py @@ -1,5 +1,5 @@ from toolbox import CatchException, update_ui, report_exception -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from crazy_functions.plugin_template.plugin_class_template import ( GptAcademicPluginTemplate, ) @@ -201,8 +201,7 @@ def 解析历史输入(history, llm_kwargs, file_manifest, chatbot, plugin_kwarg MAX_WORD_TOTAL = 4096 n_txt = len(txt) last_iteration_result = "从以下文本中提取摘要。" - if n_txt >= 20: - print("文章极长,不能达到预期效果") + for i in range(n_txt): NUM_OF_WORD = MAX_WORD_TOTAL // n_txt i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words in Chinese: {txt[i]}" diff --git a/crazy_functions/知识库问答.py b/crazy_functions/知识库问答.py index 943eeeff..f902ed09 100644 --- a/crazy_functions/知识库问答.py +++ b/crazy_functions/知识库问答.py @@ -1,6 +1,6 @@ from toolbox import CatchException, update_ui, ProxyNetworkActivate, update_ui_lastest_msg, get_log_folder, get_user -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_files_from_everything - +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_files_from_everything +from loguru import logger install_msg =""" 1. python -m pip install torch --index-url https://download.pytorch.org/whl/cpu @@ -40,7 +40,7 @@ def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst except Exception as e: chatbot.append(["依赖不足", f"{str(e)}\n\n导入依赖失败。请用以下命令安装" + install_msg]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - # from .crazy_utils import try_install_deps + # from crazy_functions.crazy_utils import try_install_deps # try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain']) # yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history) return @@ -60,7 +60,7 @@ def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst # < -------------------预热文本向量化模组--------------- > chatbot.append(['
'.join(file_manifest), "正在预热文本向量化模组, 如果是第一次运行, 将消耗较长时间下载中文向量化模型..."]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - print('Checking Text2vec ...') + logger.info('Checking Text2vec ...') from langchain.embeddings.huggingface import HuggingFaceEmbeddings with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络 HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese") @@ -68,7 +68,7 @@ def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst # < -------------------构建知识库--------------- > chatbot.append(['
'.join(file_manifest), "正在构建知识库..."]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - print('Establishing knowledge archive ...') + logger.info('Establishing knowledge archive ...') with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络 kai = knowledge_archive_interface() vs_path = get_log_folder(user=get_user(chatbot), plugin_name='vec_store') @@ -93,7 +93,7 @@ def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst except Exception as e: chatbot.append(["依赖不足", f"{str(e)}\n\n导入依赖失败。请用以下命令安装" + install_msg]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - # from .crazy_utils import try_install_deps + # from crazy_functions.crazy_utils import try_install_deps # try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain']) # yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history) return diff --git a/crazy_functions/联网的ChatGPT.py b/crazy_functions/联网的ChatGPT.py index c121e54c..20402eed 100644 --- a/crazy_functions/联网的ChatGPT.py +++ b/crazy_functions/联网的ChatGPT.py @@ -1,5 +1,5 @@ from toolbox import CatchException, update_ui -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping import requests from bs4 import BeautifulSoup from request_llms.bridge_all import model_info @@ -23,8 +23,8 @@ def google(query, proxies): item = {'title': title, 'link': link} results.append(item) - for r in results: - print(r['link']) + # for r in results: + # print(r['link']) return results def scrape_text(url, proxies) -> str: diff --git a/crazy_functions/联网的ChatGPT_bing版.py b/crazy_functions/联网的ChatGPT_bing版.py index eff6f8f9..d748ceab 100644 --- a/crazy_functions/联网的ChatGPT_bing版.py +++ b/crazy_functions/联网的ChatGPT_bing版.py @@ -1,5 +1,5 @@ from toolbox import CatchException, update_ui -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping import requests from bs4 import BeautifulSoup from request_llms.bridge_all import model_info @@ -22,8 +22,8 @@ def bing_search(query, proxies=None): item = {'title': title, 'link': link} results.append(item) - for r in results: - print(r['link']) + # for r in results: + # print(r['link']) return results diff --git a/crazy_functions/解析JupyterNotebook.py b/crazy_functions/解析JupyterNotebook.py index 2f2c0883..e7186aa9 100644 --- a/crazy_functions/解析JupyterNotebook.py +++ b/crazy_functions/解析JupyterNotebook.py @@ -64,7 +64,7 @@ def parseNotebook(filename, enable_markdown=1): def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency + from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") enable_markdown = plugin_kwargs.get("advanced_arg", "1") diff --git a/crazy_functions/询问多个大语言模型.py b/crazy_functions/询问多个大语言模型.py index a608b7b0..82619cd9 100644 --- a/crazy_functions/询问多个大语言模型.py +++ b/crazy_functions/询问多个大语言模型.py @@ -1,5 +1,5 @@ from toolbox import CatchException, update_ui, get_conf -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive import datetime @CatchException def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): diff --git a/crazy_functions/语音助手.py b/crazy_functions/语音助手.py index 1e85b361..8aeeeda4 100644 --- a/crazy_functions/语音助手.py +++ b/crazy_functions/语音助手.py @@ -1,11 +1,13 @@ from toolbox import update_ui from toolbox import CatchException, get_conf, markdown_convertion +from request_llms.bridge_all import predict_no_ui_long_connection from crazy_functions.crazy_utils import input_clipping from crazy_functions.agent_fns.watchdog import WatchDog -from request_llms.bridge_all import predict_no_ui_long_connection +from crazy_functions.live_audio.aliyunASR import AliyunASR +from loguru import logger + import threading, time import numpy as np -from .live_audio.aliyunASR import AliyunASR import json import re @@ -42,9 +44,9 @@ class AsyncGptTask(): gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=history, sys_prompt=sys_prompt, observe_window=observe_window[index], console_slience=True) except ConnectionAbortedError as token_exceed_err: - print('至少一个线程任务Token溢出而失败', e) + logger.error('至少一个线程任务Token溢出而失败', e) except Exception as e: - print('至少一个线程任务意外失败', e) + logger.error('至少一个线程任务意外失败', e) def add_async_gpt_task(self, i_say, chatbot_index, llm_kwargs, history, system_prompt): self.observe_future.append([""]) diff --git a/crazy_functions/读文章写摘要.py b/crazy_functions/读文章写摘要.py index 95adda09..1bb0d325 100644 --- a/crazy_functions/读文章写摘要.py +++ b/crazy_functions/读文章写摘要.py @@ -1,12 +1,11 @@ from toolbox import update_ui from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): import time, glob, os - print('begin analysis on:', file_manifest) for index, fp in enumerate(file_manifest): with open(fp, 'r', encoding='utf-8', errors='replace') as f: file_content = f.read() diff --git a/crazy_functions/谷歌检索小助手.py b/crazy_functions/谷歌检索小助手.py index 27873518..0cb0bbb0 100644 --- a/crazy_functions/谷歌检索小助手.py +++ b/crazy_functions/谷歌检索小助手.py @@ -1,4 +1,4 @@ -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from toolbox import CatchException, report_exception, promote_file_to_downloadzone from toolbox import update_ui, update_ui_lastest_msg, disable_auto_promotion, write_history_to_file import logging diff --git a/main.py b/main.py index 294f64af..021681c4 100644 --- a/main.py +++ b/main.py @@ -13,16 +13,10 @@ help_menu_description = \

如何语音对话: 请阅读Wiki

如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)""" +from loguru import logger def enable_log(PATH_LOGGING): - import logging - admin_log_path = os.path.join(PATH_LOGGING, "admin") - os.makedirs(admin_log_path, exist_ok=True) - log_dir = os.path.join(admin_log_path, "chat_secrets.log") - try:logging.basicConfig(filename=log_dir, level=logging.INFO, encoding="utf-8", format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S") - except:logging.basicConfig(filename=log_dir, level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S") - # Disable logging output from the 'httpx' logger - logging.getLogger("httpx").setLevel(logging.WARNING) - print(f"所有对话记录将自动保存在本地目录{log_dir}, 请注意自我隐私保护哦!") + from shared_utils.logging import setup_logging + setup_logging(PATH_LOGGING) def encode_plugin_info(k, plugin)->str: import copy @@ -42,9 +36,16 @@ def main(): import gradio as gr if gr.__version__ not in ['3.32.9', '3.32.10', '3.32.11']: raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.") - from request_llms.bridge_all import predict + + # 一些基础工具 from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith + # 对话、日志记录 + enable_log(get_conf("PATH_LOGGING")) + + # 对话句柄 + from request_llms.bridge_all import predict + # 读取配置 proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION') CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT') @@ -61,8 +62,6 @@ def main(): from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, assign_user_uuid title_html = f"

GPT 学术优化 {get_current_version()}

{theme_declaration}" - # 对话、日志记录 - enable_log(PATH_LOGGING) # 一些普通功能模块 from core_functional import get_core_functions @@ -339,9 +338,9 @@ def main(): # Gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数 def run_delayed_tasks(): import threading, webbrowser, time - print(f"如果浏览器没有自动打开,请复制并转到以下URL:") - if DARK_MODE: print(f"\t「暗色主题已启用(支持动态切换主题)」: http://localhost:{PORT}") - else: print(f"\t「亮色主题已启用(支持动态切换主题)」: http://localhost:{PORT}") + logger.info(f"如果浏览器没有自动打开,请复制并转到以下URL:") + if DARK_MODE: logger.info(f"\t「暗色主题已启用(支持动态切换主题)」: http://localhost:{PORT}") + else: logger.info(f"\t「亮色主题已启用(支持动态切换主题)」: http://localhost:{PORT}") def auto_updates(): time.sleep(0); auto_update() def open_browser(): time.sleep(2); webbrowser.open_new_tab(f"http://localhost:{PORT}") diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index ead403c0..674b4a89 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -9,6 +9,7 @@ 2. predict_no_ui_long_connection(...) """ import tiktoken, copy, re +from loguru import logger from functools import lru_cache from concurrent.futures import ThreadPoolExecutor from toolbox import get_conf, trimmed_format_exc, apply_gpt_academic_string_mask, read_one_api_model_name @@ -51,9 +52,9 @@ class LazyloadTiktoken(object): @staticmethod @lru_cache(maxsize=128) def get_encoder(model): - print('正在加载tokenizer,如果是第一次运行,可能需要一点时间下载参数') + logger.info('正在加载tokenizer,如果是第一次运行,可能需要一点时间下载参数') tmp = tiktoken.encoding_for_model(model) - print('加载tokenizer完毕') + logger.info('加载tokenizer完毕') return tmp def encode(self, *args, **kwargs): @@ -83,7 +84,7 @@ try: API_URL = get_conf("API_URL") if API_URL != "https://api.openai.com/v1/chat/completions": openai_endpoint = API_URL - print("警告!API_URL配置选项将被弃用,请更换为API_URL_REDIRECT配置") + logger.warning("警告!API_URL配置选项将被弃用,请更换为API_URL_REDIRECT配置") except: pass # 新版配置 @@ -255,8 +256,6 @@ model_info = { "max_token": 128000, "tokenizer": tokenizer_gpt4, "token_cnt": get_token_num_gpt4, - "openai_disable_system_prompt": True, - "openai_disable_stream": True, }, "o1-mini": { "fn_with_ui": chatgpt_ui, @@ -265,8 +264,6 @@ model_info = { "max_token": 128000, "tokenizer": tokenizer_gpt4, "token_cnt": get_token_num_gpt4, - "openai_disable_system_prompt": True, - "openai_disable_stream": True, }, "gpt-4-turbo": { @@ -683,7 +680,7 @@ if "newbing" in AVAIL_LLM_MODELS: # same with newbing-free } }) except: - print(trimmed_format_exc()) + logger.error(trimmed_format_exc()) if "chatglmft" in AVAIL_LLM_MODELS: # same with newbing-free try: from .bridge_chatglmft import predict_no_ui_long_connection as chatglmft_noui @@ -699,7 +696,7 @@ if "chatglmft" in AVAIL_LLM_MODELS: # same with newbing-free } }) except: - print(trimmed_format_exc()) + logger.error(trimmed_format_exc()) # -=-=-=-=-=-=- 上海AI-LAB书生大模型 -=-=-=-=-=-=- if "internlm" in AVAIL_LLM_MODELS: try: @@ -716,7 +713,7 @@ if "internlm" in AVAIL_LLM_MODELS: } }) except: - print(trimmed_format_exc()) + logger.error(trimmed_format_exc()) if "chatglm_onnx" in AVAIL_LLM_MODELS: try: from .bridge_chatglmonnx import predict_no_ui_long_connection as chatglm_onnx_noui @@ -732,7 +729,7 @@ if "chatglm_onnx" in AVAIL_LLM_MODELS: } }) except: - print(trimmed_format_exc()) + logger.error(trimmed_format_exc()) # -=-=-=-=-=-=- 通义-本地模型 -=-=-=-=-=-=- if "qwen-local" in AVAIL_LLM_MODELS: try: @@ -750,7 +747,7 @@ if "qwen-local" in AVAIL_LLM_MODELS: } }) except: - print(trimmed_format_exc()) + logger.error(trimmed_format_exc()) # -=-=-=-=-=-=- 通义-在线模型 -=-=-=-=-=-=- if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-max" in AVAIL_LLM_MODELS: # zhipuai try: @@ -786,7 +783,7 @@ if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen- } }) except: - print(trimmed_format_exc()) + logger.error(trimmed_format_exc()) # -=-=-=-=-=-=- 零一万物模型 -=-=-=-=-=-=- yi_models = ["yi-34b-chat-0205","yi-34b-chat-200k","yi-large","yi-medium","yi-spark","yi-large-turbo","yi-large-preview"] if any(item in yi_models for item in AVAIL_LLM_MODELS): @@ -866,7 +863,7 @@ if any(item in yi_models for item in AVAIL_LLM_MODELS): }, }) except: - print(trimmed_format_exc()) + logger.error(trimmed_format_exc()) # -=-=-=-=-=-=- 讯飞星火认知大模型 -=-=-=-=-=-=- if "spark" in AVAIL_LLM_MODELS: try: @@ -884,7 +881,7 @@ if "spark" in AVAIL_LLM_MODELS: } }) except: - print(trimmed_format_exc()) + logger.error(trimmed_format_exc()) if "sparkv2" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型 try: from .bridge_spark import predict_no_ui_long_connection as spark_noui @@ -901,7 +898,7 @@ if "sparkv2" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型 } }) except: - print(trimmed_format_exc()) + logger.error(trimmed_format_exc()) if any(x in AVAIL_LLM_MODELS for x in ("sparkv3", "sparkv3.5", "sparkv4")): # 讯飞星火认知大模型 try: from .bridge_spark import predict_no_ui_long_connection as spark_noui @@ -936,7 +933,7 @@ if any(x in AVAIL_LLM_MODELS for x in ("sparkv3", "sparkv3.5", "sparkv4")): # } }) except: - print(trimmed_format_exc()) + logger.error(trimmed_format_exc()) if "llama2" in AVAIL_LLM_MODELS: # llama2 try: from .bridge_llama2 import predict_no_ui_long_connection as llama2_noui @@ -952,7 +949,7 @@ if "llama2" in AVAIL_LLM_MODELS: # llama2 } }) except: - print(trimmed_format_exc()) + logger.error(trimmed_format_exc()) # -=-=-=-=-=-=- 智谱 -=-=-=-=-=-=- if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai 是glm-4的别名,向后兼容配置 try: @@ -967,7 +964,7 @@ if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai 是glm-4的别名,向后兼容 }, }) except: - print(trimmed_format_exc()) + logger.error(trimmed_format_exc()) # -=-=-=-=-=-=- 幻方-深度求索大模型 -=-=-=-=-=-=- if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder try: @@ -984,7 +981,7 @@ if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder } }) except: - print(trimmed_format_exc()) + logger.error(trimmed_format_exc()) # -=-=-=-=-=-=- 幻方-深度求索大模型在线API -=-=-=-=-=-=- if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS: try: @@ -1012,7 +1009,7 @@ if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS: }, }) except: - print(trimmed_format_exc()) + logger.error(trimmed_format_exc()) # -=-=-=-=-=-=- one-api 对齐支持 -=-=-=-=-=-=- for model in [m for m in AVAIL_LLM_MODELS if m.startswith("one-api-")]: # 为了更灵活地接入one-api多模型管理界面,设计了此接口,例子:AVAIL_LLM_MODELS = ["one-api-mixtral-8x7b(max_token=6666)"] @@ -1025,7 +1022,7 @@ for model in [m for m in AVAIL_LLM_MODELS if m.startswith("one-api-")]: # 如果是已知模型,则尝试获取其信息 original_model_info = model_info.get(origin_model_name.replace("one-api-", "", 1), None) except: - print(f"one-api模型 {model} 的 max_token 配置不是整数,请检查配置文件。") + logger.error(f"one-api模型 {model} 的 max_token 配置不是整数,请检查配置文件。") continue this_model_info = { "fn_with_ui": chatgpt_ui, @@ -1056,7 +1053,7 @@ for model in [m for m in AVAIL_LLM_MODELS if m.startswith("vllm-")]: try: _, max_token_tmp = read_one_api_model_name(model) except: - print(f"vllm模型 {model} 的 max_token 配置不是整数,请检查配置文件。") + logger.error(f"vllm模型 {model} 的 max_token 配置不是整数,请检查配置文件。") continue model_info.update({ model: { @@ -1083,7 +1080,7 @@ for model in [m for m in AVAIL_LLM_MODELS if m.startswith("ollama-")]: try: _, max_token_tmp = read_one_api_model_name(model) except: - print(f"ollama模型 {model} 的 max_token 配置不是整数,请检查配置文件。") + logger.error(f"ollama模型 {model} 的 max_token 配置不是整数,请检查配置文件。") continue model_info.update({ model: { diff --git a/request_llms/bridge_chatglmft.py b/request_llms/bridge_chatglmft.py index 394a3387..fe1489db 100644 --- a/request_llms/bridge_chatglmft.py +++ b/request_llms/bridge_chatglmft.py @@ -1,12 +1,13 @@ from transformers import AutoModel, AutoTokenizer +from loguru import logger +from toolbox import update_ui, get_conf +from multiprocessing import Process, Pipe import time import os import json import threading import importlib -from toolbox import update_ui, get_conf -from multiprocessing import Process, Pipe load_message = "ChatGLMFT尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLMFT消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……" @@ -78,7 +79,7 @@ class GetGLMFTHandle(Process): config.pre_seq_len = model_args['pre_seq_len'] config.prefix_projection = model_args['prefix_projection'] - print(f"Loading prefix_encoder weight from {CHATGLM_PTUNING_CHECKPOINT}") + logger.info(f"Loading prefix_encoder weight from {CHATGLM_PTUNING_CHECKPOINT}") model = AutoModel.from_pretrained(model_args['model_name_or_path'], config=config, trust_remote_code=True) prefix_state_dict = torch.load(os.path.join(CHATGLM_PTUNING_CHECKPOINT, "pytorch_model.bin")) new_prefix_state_dict = {} @@ -88,7 +89,7 @@ class GetGLMFTHandle(Process): model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) if model_args['quantization_bit'] is not None and model_args['quantization_bit'] != 0: - print(f"Quantized to {model_args['quantization_bit']} bit") + logger.info(f"Quantized to {model_args['quantization_bit']} bit") model = model.quantize(model_args['quantization_bit']) model = model.cuda() if model_args['pre_seq_len'] is not None: diff --git a/request_llms/bridge_chatgpt.py b/request_llms/bridge_chatgpt.py index 03694b1b..763897a1 100644 --- a/request_llms/bridge_chatgpt.py +++ b/request_llms/bridge_chatgpt.py @@ -12,11 +12,12 @@ import json import os import re import time -import logging import traceback import requests import random +from loguru import logger + # config_private.py放自己的秘密如API和代理网址 # 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件 from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history @@ -152,7 +153,7 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], retry += 1 traceback.print_exc() if retry > MAX_RETRY: raise TimeoutError - if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') + if MAX_RETRY!=0: logger.error(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') if not stream: # 该分支仅适用于不支持stream的o1模型,其他情形一律不适用 @@ -191,7 +192,7 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], if (not has_content) and (not has_role): continue # raise RuntimeError("发现不标准的第三方接口:"+delta) if has_content: # has_role = True/False result += delta["content"] - if not console_slience: print(delta["content"], end='') + if not console_slience: logger.info(delta["content"], end='') if observe_window is not None: # 观测窗,把已经获取的数据显示出去 if len(observe_window) >= 1: @@ -337,7 +338,6 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith # 前者是API2D的结束条件,后者是OPENAI的结束条件 if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0): # 判定为数据流的结束,gpt_replying_buffer也写完了 - # logging.info(f'[response] {gpt_replying_buffer}') log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer) break # 处理数据流的主体 @@ -364,7 +364,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith error_msg = chunk_decoded chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg) yield from update_ui(chatbot=chatbot, history=history, msg="Json解析异常" + error_msg) # 刷新界面 - print(error_msg) + logger.error(error_msg) return return # return from stream-branch @@ -524,7 +524,6 @@ def generate_payload(inputs:str, llm_kwargs:dict, history:list, system_prompt:st "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-0301", ]) - logging.info("Random select model:" + model) payload = { "model": model, @@ -534,10 +533,7 @@ def generate_payload(inputs:str, llm_kwargs:dict, history:list, system_prompt:st "n": 1, "stream": stream, } - # try: - # print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........") - # except: - # print('输入中可能存在乱码。') + return headers,payload diff --git a/request_llms/bridge_chatgpt_vision.py b/request_llms/bridge_chatgpt_vision.py index 449a8cf5..dfa6e064 100644 --- a/request_llms/bridge_chatgpt_vision.py +++ b/request_llms/bridge_chatgpt_vision.py @@ -8,15 +8,15 @@ 2. predict_no_ui_long_connection:支持多线程 """ +import os import json import time -import logging import requests import base64 -import os import glob +from loguru import logger from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc, is_the_upload_folder, \ - update_ui_lastest_msg, get_max_token, encode_image, have_any_recent_upload_image_files + update_ui_lastest_msg, get_max_token, encode_image, have_any_recent_upload_image_files, log_chat proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \ @@ -100,7 +100,6 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot) raw_input = inputs - logging.info(f'[raw_input] {raw_input}') def make_media_input(inputs, image_paths): for image_path in image_paths: inputs = inputs + f'

' @@ -185,7 +184,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp # 判定为数据流的结束,gpt_replying_buffer也写完了 lastmsg = chatbot[-1][-1] + f"\n\n\n\n「{llm_kwargs['llm_model']}调用结束,该模型不具备上下文对话能力,如需追问,请及时切换模型。」" yield from update_ui_lastest_msg(lastmsg, chatbot, history, delay=1) - logging.info(f'[response] {gpt_replying_buffer}') + log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer) break # 处理数据流的主体 status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}" @@ -210,7 +209,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp error_msg = chunk_decoded chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg, api_key) yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面 - print(error_msg) + logger.error(error_msg) return def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg, api_key=""): @@ -301,10 +300,7 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, image_paths): "presence_penalty": 0, "frequency_penalty": 0, } - try: - print(f" {llm_kwargs['llm_model']} : {inputs[:100]} ..........") - except: - print('输入中可能存在乱码。') + return headers, payload, api_key diff --git a/request_llms/bridge_chatgpt_website.py b/request_llms/bridge_chatgpt_website.py deleted file mode 100644 index 94e1ebb3..00000000 --- a/request_llms/bridge_chatgpt_website.py +++ /dev/null @@ -1,281 +0,0 @@ -# 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目 - -""" - 该文件中主要包含三个函数 - - 不具备多线程能力的函数: - 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 - - 具备多线程调用能力的函数 - 2. predict_no_ui_long_connection:支持多线程 -""" - -import json -import time -import gradio as gr -import logging -import traceback -import requests -import importlib - -# config_private.py放自己的秘密如API和代理网址 -# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件 -from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc -proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG = \ - get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG') - -timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \ - '网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。' - -def get_full_error(chunk, stream_response): - """ - 获取完整的从Openai返回的报错 - """ - while True: - try: - chunk += next(stream_response) - except: - break - return chunk - - -def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False): - """ - 发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。 - inputs: - 是本次问询的输入 - sys_prompt: - 系统静默prompt - llm_kwargs: - chatGPT的内部调优参数 - history: - 是之前的对话列表 - observe_window = None: - 用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗 - """ - watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可 - headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True) - retry = 0 - while True: - try: - # make a POST request to the API endpoint, stream=False - from .bridge_all import model_info - endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] - response = requests.post(endpoint, headers=headers, proxies=proxies, - json=payload, stream=True, timeout=TIMEOUT_SECONDS); break - except requests.exceptions.ReadTimeout as e: - retry += 1 - traceback.print_exc() - if retry > MAX_RETRY: raise TimeoutError - if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') - - stream_response = response.iter_lines() - result = '' - while True: - try: chunk = next(stream_response).decode() - except StopIteration: - break - except requests.exceptions.ConnectionError: - chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。 - if len(chunk)==0: continue - if not chunk.startswith('data:'): - error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode() - if "reduce the length" in error_msg: - raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg) - else: - raise RuntimeError("OpenAI拒绝了请求:" + error_msg) - if ('data: [DONE]' in chunk): break # api2d 正常完成 - json_data = json.loads(chunk.lstrip('data:'))['choices'][0] - delta = json_data["delta"] - if len(delta) == 0: break - if "role" in delta: continue - if "content" in delta: - result += delta["content"] - if not console_slience: print(delta["content"], end='') - if observe_window is not None: - # 观测窗,把已经获取的数据显示出去 - if len(observe_window) >= 1: observe_window[0] += delta["content"] - # 看门狗,如果超过期限没有喂狗,则终止 - if len(observe_window) >= 2: - if (time.time()-observe_window[1]) > watch_dog_patience: - raise RuntimeError("用户取消了程序。") - else: raise RuntimeError("意外Json结构:"+delta) - if json_data['finish_reason'] == 'content_filter': - raise RuntimeError("由于提问含不合规内容被Azure过滤。") - if json_data['finish_reason'] == 'length': - raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。") - return result - - -def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): - """ - 发送至chatGPT,流式获取输出。 - 用于基础的对话功能。 - inputs 是本次问询的输入 - top_p, temperature是chatGPT的内部调优参数 - history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误) - chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容 - additional_fn代表点击的哪个按钮,按钮见functional.py - """ - if additional_fn is not None: - from core_functional import handle_core_functionality - inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot) - - raw_input = inputs - logging.info(f'[raw_input] {raw_input}') - chatbot.append((inputs, "")) - yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面 - - try: - headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream) - except RuntimeError as e: - chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。") - yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面 - return - - history.append(inputs); history.append("") - - retry = 0 - while True: - try: - # make a POST request to the API endpoint, stream=True - from .bridge_all import model_info - endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] - response = requests.post(endpoint, headers=headers, proxies=proxies, - json=payload, stream=True, timeout=TIMEOUT_SECONDS);break - except: - retry += 1 - chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg)) - retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else "" - yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面 - if retry > MAX_RETRY: raise TimeoutError - - gpt_replying_buffer = "" - - is_head_of_the_stream = True - if stream: - stream_response = response.iter_lines() - while True: - try: - chunk = next(stream_response) - except StopIteration: - # 非OpenAI官方接口的出现这样的报错,OpenAI和API2D不会走这里 - chunk_decoded = chunk.decode() - error_msg = chunk_decoded - chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg) - yield from update_ui(chatbot=chatbot, history=history, msg="非Openai官方接口返回了错误:" + chunk.decode()) # 刷新界面 - return - - # print(chunk.decode()[6:]) - if is_head_of_the_stream and (r'"object":"error"' not in chunk.decode()): - # 数据流的第一帧不携带content - is_head_of_the_stream = False; continue - - if chunk: - try: - chunk_decoded = chunk.decode() - # 前者是API2D的结束条件,后者是OPENAI的结束条件 - if 'data: [DONE]' in chunk_decoded: - # 判定为数据流的结束,gpt_replying_buffer也写完了 - logging.info(f'[response] {gpt_replying_buffer}') - break - # 处理数据流的主体 - chunkjson = json.loads(chunk_decoded[6:]) - status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}" - delta = chunkjson['choices'][0]["delta"] - if "content" in delta: - gpt_replying_buffer = gpt_replying_buffer + delta["content"] - history[-1] = gpt_replying_buffer - chatbot[-1] = (history[-2], history[-1]) - yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面 - except Exception as e: - yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面 - chunk = get_full_error(chunk, stream_response) - chunk_decoded = chunk.decode() - error_msg = chunk_decoded - chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg) - yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面 - print(error_msg) - return - -def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg): - from .bridge_all import model_info - openai_website = ' 请登录OpenAI查看详情 https://platform.openai.com/signup' - if "reduce the length" in error_msg: - if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出 - history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'], - max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一 - chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)") - # history = [] # 清除历史 - elif "does not exist" in error_msg: - chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.") - elif "Incorrect API key" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务. " + openai_website) - elif "exceeded your current quota" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务." + openai_website) - elif "account is not active" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Your account is not active. OpenAI以账户失效为由, 拒绝服务." + openai_website) - elif "associated with a deactivated account" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] You are associated with a deactivated account. OpenAI以账户失效为由, 拒绝服务." + openai_website) - elif "bad forward key" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.") - elif "Not enough point" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Not enough point. API2D账户点数不足.") - else: - from toolbox import regular_txt_to_markdown - tb_str = '```\n' + trimmed_format_exc() + '```' - chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}") - return chatbot, history - -def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): - """ - 整合所有信息,选择LLM模型,生成http请求,为发送请求做准备 - """ - if not is_any_api_key(llm_kwargs['api_key']): - raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。") - - headers = { - "Content-Type": "application/json", - } - - conversation_cnt = len(history) // 2 - - messages = [{"role": "system", "content": system_prompt}] - if conversation_cnt: - for index in range(0, 2*conversation_cnt, 2): - what_i_have_asked = {} - what_i_have_asked["role"] = "user" - what_i_have_asked["content"] = history[index] - what_gpt_answer = {} - what_gpt_answer["role"] = "assistant" - what_gpt_answer["content"] = history[index+1] - if what_i_have_asked["content"] != "": - if what_gpt_answer["content"] == "": continue - if what_gpt_answer["content"] == timeout_bot_msg: continue - messages.append(what_i_have_asked) - messages.append(what_gpt_answer) - else: - messages[-1]['content'] = what_gpt_answer['content'] - - what_i_ask_now = {} - what_i_ask_now["role"] = "user" - what_i_ask_now["content"] = inputs - messages.append(what_i_ask_now) - - payload = { - "model": llm_kwargs['llm_model'].strip('api2d-'), - "messages": messages, - "temperature": llm_kwargs['temperature'], # 1.0, - "top_p": llm_kwargs['top_p'], # 1.0, - "n": 1, - "stream": stream, - "presence_penalty": 0, - "frequency_penalty": 0, - } - try: - print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........") - except: - print('输入中可能存在乱码。') - return headers,payload - - diff --git a/request_llms/bridge_claude.py b/request_llms/bridge_claude.py index 2c27fe30..a08fadc8 100644 --- a/request_llms/bridge_claude.py +++ b/request_llms/bridge_claude.py @@ -9,13 +9,14 @@ 具备多线程调用能力的函数 2. predict_no_ui_long_connection:支持多线程 """ -import logging import os import time import traceback import json import requests +from loguru import logger from toolbox import get_conf, update_ui, trimmed_format_exc, encode_image, every_image_file_in_path, log_chat + picture_system_prompt = "\n当回复图像时,必须说明正在回复哪张图像。所有图像仅在最后一个问题中提供,即使它们在历史记录中被提及。请使用'这是第X张图像:'的格式来指明您正在描述的是哪张图像。" Claude_3_Models = ["claude-3-haiku-20240307", "claude-3-sonnet-20240229", "claude-3-opus-20240229", "claude-3-5-sonnet-20240620"] @@ -101,7 +102,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", retry += 1 traceback.print_exc() if retry > MAX_RETRY: raise TimeoutError - if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') + if MAX_RETRY!=0: logger.error(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') stream_response = response.iter_lines() result = '' while True: @@ -116,12 +117,11 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", if need_to_pass: pass elif is_last_chunk: - # logging.info(f'[response] {result}') + # logger.info(f'[response] {result}') break else: if chunkjson and chunkjson['type'] == 'content_block_delta': result += chunkjson['delta']['text'] - print(chunkjson['delta']['text'], end='') if observe_window is not None: # 观测窗,把已经获取的数据显示出去 if len(observe_window) >= 1: @@ -134,7 +134,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", chunk = get_full_error(chunk, stream_response) chunk_decoded = chunk.decode() error_msg = chunk_decoded - print(error_msg) + logger.error(error_msg) raise RuntimeError("Json解析不合常规") return result @@ -200,7 +200,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp retry += 1 traceback.print_exc() if retry > MAX_RETRY: raise TimeoutError - if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') + if MAX_RETRY!=0: logger.error(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') stream_response = response.iter_lines() gpt_replying_buffer = "" @@ -217,7 +217,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp pass elif is_last_chunk: log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer) - # logging.info(f'[response] {gpt_replying_buffer}') + # logger.info(f'[response] {gpt_replying_buffer}') break else: if chunkjson and chunkjson['type'] == 'content_block_delta': @@ -230,7 +230,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp chunk = get_full_error(chunk, stream_response) chunk_decoded = chunk.decode() error_msg = chunk_decoded - print(error_msg) + logger.error(error_msg) raise RuntimeError("Json解析不合常规") def multiple_picture_types(image_paths): diff --git a/request_llms/bridge_cohere.py b/request_llms/bridge_cohere.py index 5ce5846c..64941145 100644 --- a/request_llms/bridge_cohere.py +++ b/request_llms/bridge_cohere.py @@ -13,11 +13,9 @@ import json import time import gradio as gr -import logging import traceback import requests -import importlib -import random +from loguru import logger # config_private.py放自己的秘密如API和代理网址 # 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件 @@ -98,7 +96,7 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], retry += 1 traceback.print_exc() if retry > MAX_RETRY: raise TimeoutError - if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') + if MAX_RETRY!=0: logger.error(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') stream_response = response.iter_lines() result = '' @@ -113,7 +111,7 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], if chunkjson['event_type'] == 'stream-start': continue if chunkjson['event_type'] == 'text-generation': result += chunkjson["text"] - if not console_slience: print(chunkjson["text"], end='') + if not console_slience: logger.info(chunkjson["text"], end='') if observe_window is not None: # 观测窗,把已经获取的数据显示出去 if len(observe_window) >= 1: @@ -153,7 +151,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot) raw_input = inputs - # logging.info(f'[raw_input] {raw_input}') + # logger.info(f'[raw_input] {raw_input}') chatbot.append((inputs, "")) yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面 @@ -237,7 +235,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith error_msg = chunk_decoded chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg) yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面 - print(error_msg) + logger.error(error_msg) return def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg): diff --git a/request_llms/bridge_deepseekcoder.py b/request_llms/bridge_deepseekcoder.py index f8e62e68..9f73375a 100644 --- a/request_llms/bridge_deepseekcoder.py +++ b/request_llms/bridge_deepseekcoder.py @@ -1,12 +1,13 @@ model_name = "deepseek-coder-6.7b-instruct" cmd_to_install = "未知" # "`pip install -r request_llms/requirements_qwen.txt`" -import os from toolbox import ProxyNetworkActivate from toolbox import get_conf -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns +from request_llms.local_llm_class import LocalLLMHandle, get_local_llm_predict_fns from threading import Thread +from loguru import logger import torch +import os def download_huggingface_model(model_name, max_retry, local_dir): from huggingface_hub import snapshot_download @@ -15,7 +16,7 @@ def download_huggingface_model(model_name, max_retry, local_dir): snapshot_download(repo_id=model_name, local_dir=local_dir, resume_download=True) break except Exception as e: - print(f'\n\n下载失败,重试第{i}次中...\n\n') + logger.error(f'\n\n下载失败,重试第{i}次中...\n\n') return local_dir # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model @@ -112,7 +113,6 @@ class GetCoderLMHandle(LocalLLMHandle): generated_text = "" for new_text in self._streamer: generated_text += new_text - # print(generated_text) yield generated_text diff --git a/request_llms/bridge_internlm.py b/request_llms/bridge_internlm.py index fb4437a1..4cd12235 100644 --- a/request_llms/bridge_internlm.py +++ b/request_llms/bridge_internlm.py @@ -65,10 +65,10 @@ class GetInternlmHandle(LocalLLMHandle): def llm_stream_generator(self, **kwargs): import torch - import logging import copy import warnings import torch.nn as nn + from loguru import logger as logging from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行 @@ -119,7 +119,7 @@ class GetInternlmHandle(LocalLLMHandle): elif generation_config.max_new_tokens is not None: generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length if not has_default_max_length: - logging.warn( + logging.warning( f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " "Please refer to the documentation for more information. " diff --git a/request_llms/bridge_moonshot.py b/request_llms/bridge_moonshot.py index 1f73bd50..e1b3cd48 100644 --- a/request_llms/bridge_moonshot.py +++ b/request_llms/bridge_moonshot.py @@ -5,7 +5,6 @@ import json import os import time -import logging from toolbox import get_conf, update_ui, log_chat from toolbox import ChatBotWithCookies diff --git a/request_llms/bridge_ollama.py b/request_llms/bridge_ollama.py index 96f30503..90744fa6 100644 --- a/request_llms/bridge_ollama.py +++ b/request_llms/bridge_ollama.py @@ -13,11 +13,11 @@ import json import time import gradio as gr -import logging import traceback import requests import importlib import random +from loguru import logger # config_private.py放自己的秘密如API和代理网址 # 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件 @@ -81,7 +81,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", retry += 1 traceback.print_exc() if retry > MAX_RETRY: raise TimeoutError - if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') + if MAX_RETRY!=0: logger.error(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') stream_response = response.iter_lines() result = '' @@ -96,10 +96,10 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", try: if is_last_chunk: # 判定为数据流的结束,gpt_replying_buffer也写完了 - logging.info(f'[response] {result}') + logger.info(f'[response] {result}') break result += chunkjson['message']["content"] - if not console_slience: print(chunkjson['message']["content"], end='') + if not console_slience: logger.info(chunkjson['message']["content"], end='') if observe_window is not None: # 观测窗,把已经获取的数据显示出去 if len(observe_window) >= 1: @@ -112,7 +112,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", chunk = get_full_error(chunk, stream_response) chunk_decoded = chunk.decode() error_msg = chunk_decoded - print(error_msg) + logger.error(error_msg) raise RuntimeError("Json解析不合常规") return result @@ -134,7 +134,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot) raw_input = inputs - logging.info(f'[raw_input] {raw_input}') + logger.info(f'[raw_input] {raw_input}') chatbot.append((inputs, "")) yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面 @@ -183,7 +183,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp try: if is_last_chunk: # 判定为数据流的结束,gpt_replying_buffer也写完了 - logging.info(f'[response] {gpt_replying_buffer}') + logger.info(f'[response] {gpt_replying_buffer}') break # 处理数据流的主体 try: @@ -202,7 +202,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp error_msg = chunk_decoded chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg) yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面 - print(error_msg) + logger.error(error_msg) return def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg): @@ -265,8 +265,5 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): "messages": messages, "options": options, } - try: - print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........") - except: - print('输入中可能存在乱码。') + return headers,payload diff --git a/request_llms/bridge_stackclaude.py b/request_llms/bridge_stackclaude.py index 21590b8a..deed4676 100644 --- a/request_llms/bridge_stackclaude.py +++ b/request_llms/bridge_stackclaude.py @@ -1,12 +1,13 @@ +import time +import asyncio +import threading +import importlib + from .bridge_newbingfree import preprocess_newbing_out, preprocess_newbing_out_simple from multiprocessing import Process, Pipe from toolbox import update_ui, get_conf, trimmed_format_exc -import threading -import importlib -import logging -import time +from loguru import logger as logging from toolbox import get_conf -import asyncio load_message = "正在加载Claude组件,请稍候..." diff --git a/request_llms/bridge_tgui.py b/request_llms/bridge_tgui.py index 8a16f1bf..4be498a3 100644 --- a/request_llms/bridge_tgui.py +++ b/request_llms/bridge_tgui.py @@ -8,7 +8,6 @@ import json import random import string import websockets -import logging import time import threading import importlib diff --git a/request_llms/com_google.py b/request_llms/com_google.py index 9df3a997..88e094f5 100644 --- a/request_llms/com_google.py +++ b/request_llms/com_google.py @@ -218,5 +218,3 @@ class GoogleChatInit: if __name__ == "__main__": google = GoogleChatInit() - # print(gootle.generate_message_payload('你好呀', {}, ['123123', '3123123'], '')) - # gootle.input_encode_handle('123123[123123](./123123), ![53425](./asfafa/fff.jpg)') diff --git a/request_llms/com_qwenapi.py b/request_llms/com_qwenapi.py index a3adad0b..8e037781 100644 --- a/request_llms/com_qwenapi.py +++ b/request_llms/com_qwenapi.py @@ -1,7 +1,6 @@ from http import HTTPStatus from toolbox import get_conf import threading -import logging timeout_bot_msg = '[Local Message] Request timeout. Network error.' diff --git a/request_llms/com_skylark2api.py b/request_llms/com_skylark2api.py index 38875c21..715c01a1 100644 --- a/request_llms/com_skylark2api.py +++ b/request_llms/com_skylark2api.py @@ -1,7 +1,7 @@ -from toolbox import get_conf -import threading -import logging import os +import threading +from toolbox import get_conf +from loguru import logger as logging timeout_bot_msg = '[Local Message] Request timeout. Network error.' #os.environ['VOLC_ACCESSKEY'] = '' diff --git a/request_llms/com_sparkapi.py b/request_llms/com_sparkapi.py index a9196002..100419d5 100644 --- a/request_llms/com_sparkapi.py +++ b/request_llms/com_sparkapi.py @@ -1,17 +1,18 @@ -from toolbox import get_conf, get_pictures_list, encode_image import base64 import datetime import hashlib import hmac import json -from urllib.parse import urlparse import ssl +import websocket +import threading +from toolbox import get_conf, get_pictures_list, encode_image +from loguru import logger +from urllib.parse import urlparse from datetime import datetime from time import mktime from urllib.parse import urlencode from wsgiref.handlers import format_date_time -import websocket -import threading, time timeout_bot_msg = '[Local Message] Request timeout. Network error.' @@ -104,7 +105,7 @@ class SparkRequestInstance(): if llm_kwargs['most_recent_uploaded'].get('path'): file_manifest = get_pictures_list(llm_kwargs['most_recent_uploaded']['path']) if len(file_manifest) > 0: - print('正在使用讯飞图片理解API') + logger.info('正在使用讯飞图片理解API') gpt_url = self.gpt_url_img wsParam = Ws_Param(self.appid, self.api_key, self.api_secret, gpt_url) websocket.enableTrace(False) @@ -123,7 +124,7 @@ class SparkRequestInstance(): data = json.loads(message) code = data['header']['code'] if code != 0: - print(f'请求错误: {code}, {data}') + logger.error(f'请求错误: {code}, {data}') self.result_buf += str(data) ws.close() self.time_to_exit_event.set() @@ -140,7 +141,7 @@ class SparkRequestInstance(): # 收到websocket错误的处理 def on_error(ws, error): - print("error:", error) + logger.error("error:", error) self.time_to_exit_event.set() # 收到websocket关闭的处理 diff --git a/request_llms/com_taichu.py b/request_llms/com_taichu.py index f8eb3981..22f8f5eb 100644 --- a/request_llms/com_taichu.py +++ b/request_llms/com_taichu.py @@ -4,7 +4,7 @@ # @Descr : 兼容最新的智谱Ai from toolbox import get_conf from toolbox import get_conf, encode_image, get_pictures_list -import logging, os, requests +import requests import json class TaichuChatInit: def __init__(self): ... diff --git a/request_llms/com_zhipuglm.py b/request_llms/com_zhipuglm.py index 3aa9f210..0935d2c2 100644 --- a/request_llms/com_zhipuglm.py +++ b/request_llms/com_zhipuglm.py @@ -5,7 +5,8 @@ from toolbox import get_conf from zhipuai import ZhipuAI from toolbox import get_conf, encode_image, get_pictures_list -import logging, os +from loguru import logger +import os def input_encode_handler(inputs:str, llm_kwargs:dict): @@ -24,7 +25,7 @@ class ZhipuChatInit: def __init__(self): ZHIPUAI_API_KEY, ZHIPUAI_MODEL = get_conf("ZHIPUAI_API_KEY", "ZHIPUAI_MODEL") if len(ZHIPUAI_MODEL) > 0: - logging.error('ZHIPUAI_MODEL 配置项选项已经弃用,请在LLM_MODEL中配置') + logger.error('ZHIPUAI_MODEL 配置项选项已经弃用,请在LLM_MODEL中配置') self.zhipu_bro = ZhipuAI(api_key=ZHIPUAI_API_KEY) self.model = '' @@ -37,8 +38,7 @@ class ZhipuChatInit: what_i_have_asked['content'].append({"type": 'text', "text": user_input}) if encode_img: if len(encode_img) > 1: - logging.warning("glm-4v只支持一张图片,将只取第一张图片进行处理") - print("glm-4v只支持一张图片,将只取第一张图片进行处理") + logger.warning("glm-4v只支持一张图片,将只取第一张图片进行处理") img_d = {"type": "image_url", "image_url": { "url": encode_img[0]['data'] diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index 75dd17d1..6c218d7e 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -5,6 +5,7 @@ from toolbox import ChatBotWithCookies from multiprocessing import Process, Pipe from contextlib import redirect_stdout from request_llms.queued_pipe import create_queue_pipe +from loguru import logger class ThreadLock(object): def __init__(self): @@ -51,7 +52,7 @@ def reset_tqdm_output(): getattr(sys.stdout, 'flush', lambda: None)() def fp_write(s): - print(s) + logger.info(s) last_len = [0] def print_status(s): @@ -199,7 +200,7 @@ class LocalLLMHandle(Process): if res.startswith(self.std_tag): new_output = res[len(self.std_tag):] std_out = std_out[:std_out_clip_len] - print(new_output, end='') + logger.info(new_output, end='') std_out = new_output + std_out yield self.std_tag + '\n```\n' + std_out + '\n```\n' elif res == '[Finish]': diff --git a/request_llms/oai_std_model_template.py b/request_llms/oai_std_model_template.py index 2f8480c4..285ca38d 100644 --- a/request_llms/oai_std_model_template.py +++ b/request_llms/oai_std_model_template.py @@ -1,8 +1,8 @@ import json import time -import logging import traceback import requests +from loguru import logger # config_private.py放自己的秘密如API和代理网址 # 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件 @@ -106,10 +106,7 @@ def generate_message(input, model, key, history, max_output_token, system_prompt "stream": True, "max_tokens": max_output_token, } - try: - print(f" {model} : {conversation_cnt} : {input[:100]} ..........") - except: - print("输入中可能存在乱码。") + return headers, playload @@ -196,7 +193,7 @@ def get_predict_function( if retry > MAX_RETRY: raise TimeoutError if MAX_RETRY != 0: - print(f"请求超时,正在重试 ({retry}/{MAX_RETRY}) ……") + logger.error(f"请求超时,正在重试 ({retry}/{MAX_RETRY}) ……") stream_response = response.iter_lines() result = "" @@ -219,18 +216,17 @@ def get_predict_function( ): chunk = get_full_error(chunk, stream_response) chunk_decoded = chunk.decode() - print(chunk_decoded) + logger.error(chunk_decoded) raise RuntimeError( f"API异常,请检测终端输出。可能的原因是:{finish_reason}" ) if chunk: try: if finish_reason == "stop": - logging.info(f"[response] {result}") + if not console_slience: + logger.info(f"[response] {result}") break result += response_text - if not console_slience: - print(response_text, end="") if observe_window is not None: # 观测窗,把已经获取的数据显示出去 if len(observe_window) >= 1: @@ -243,7 +239,7 @@ def get_predict_function( chunk = get_full_error(chunk, stream_response) chunk_decoded = chunk.decode() error_msg = chunk_decoded - print(error_msg) + logger.error(error_msg) raise RuntimeError("Json解析不合常规") return result @@ -276,7 +272,7 @@ def get_predict_function( inputs, history = handle_core_functionality( additional_fn, inputs, history, chatbot ) - logging.info(f"[raw_input] {inputs}") + logger.info(f"[raw_input] {inputs}") chatbot.append((inputs, "")) yield from update_ui( chatbot=chatbot, history=history, msg="等待响应" @@ -376,11 +372,11 @@ def get_predict_function( history=history, msg="API异常:" + chunk_decoded, ) # 刷新界面 - print(chunk_decoded) + logger.error(chunk_decoded) return if finish_reason == "stop": - logging.info(f"[response] {gpt_replying_buffer}") + logger.info(f"[response] {gpt_replying_buffer}") break status_text = f"finish_reason: {finish_reason}" gpt_replying_buffer += response_text @@ -403,7 +399,7 @@ def get_predict_function( yield from update_ui( chatbot=chatbot, history=history, msg="Json异常" + chunk_decoded ) # 刷新界面 - print(chunk_decoded) + logger.error(chunk_decoded) return return predict_no_ui_long_connection, predict diff --git a/requirements.txt b/requirements.txt index 13c0bb56..36b9697f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -28,6 +28,7 @@ edge-tts pymupdf openai rjsmin +loguru arxiv numpy rich diff --git a/shared_utils/advanced_markdown_format.py b/shared_utils/advanced_markdown_format.py index 6a42fb65..883c3ffb 100644 --- a/shared_utils/advanced_markdown_format.py +++ b/shared_utils/advanced_markdown_format.py @@ -2,6 +2,8 @@ import markdown import re import os import math + +from loguru import logger from textwrap import dedent from functools import lru_cache from pymdownx.superfences import fence_code_format @@ -227,14 +229,14 @@ def fix_dollar_sticking_bug(txt): if single_stack_height > 0: if txt[:(index+1)].find('\n') > 0 or txt[:(index+1)].find('') > 0 or txt[:(index+1)].find('') > 0: - print('公式之中出现了异常 (Unexpect element in equation)') + logger.error('公式之中出现了异常 (Unexpect element in equation)') single_stack_height = 0 txt_result += ' $' continue if double_stack_height > 0: if txt[:(index+1)].find('\n\n') > 0: - print('公式之中出现了异常 (Unexpect element in equation)') + logger.error('公式之中出现了异常 (Unexpect element in equation)') double_stack_height = 0 txt_result += '$$' continue @@ -253,13 +255,13 @@ def fix_dollar_sticking_bug(txt): txt = txt[(index+2):] else: if double_stack_height != 0: - # print(txt[:(index)]) - print('发现异常嵌套公式') + # logger.info(txt[:(index)]) + logger.info('发现异常嵌套公式') if single_stack_height == 0: single_stack_height = 1 else: single_stack_height = 0 - # print(txt[:(index)]) + # logger.info(txt[:(index)]) txt_result += txt[:(index+1)] txt = txt[(index+1):] break diff --git a/shared_utils/colorful.py b/shared_utils/colorful.py index f0414e57..b014c671 100644 --- a/shared_utils/colorful.py +++ b/shared_utils/colorful.py @@ -1,5 +1,6 @@ import platform from sys import stdout +from loguru import logger if platform.system()=="Linux": pass @@ -59,3 +60,29 @@ def sprint亮紫(*kw): return "\033[1;35m"+' '.join(kw)+"\033[0m" def sprint亮靛(*kw): return "\033[1;36m"+' '.join(kw)+"\033[0m" + +def log红(*kw,**kargs): + logger.opt(depth=1).info(sprint红(*kw)) +def log绿(*kw,**kargs): + logger.opt(depth=1).info(sprint绿(*kw)) +def log黄(*kw,**kargs): + logger.opt(depth=1).info(sprint黄(*kw)) +def log蓝(*kw,**kargs): + logger.opt(depth=1).info(sprint蓝(*kw)) +def log紫(*kw,**kargs): + logger.opt(depth=1).info(sprint紫(*kw)) +def log靛(*kw,**kargs): + logger.opt(depth=1).info(sprint靛(*kw)) + +def log亮红(*kw,**kargs): + logger.opt(depth=1).info(sprint亮红(*kw)) +def log亮绿(*kw,**kargs): + logger.opt(depth=1).info(sprint亮绿(*kw)) +def log亮黄(*kw,**kargs): + logger.opt(depth=1).info(sprint亮黄(*kw)) +def log亮蓝(*kw,**kargs): + logger.opt(depth=1).info(sprint亮蓝(*kw)) +def log亮紫(*kw,**kargs): + logger.opt(depth=1).info(sprint亮紫(*kw)) +def log亮靛(*kw,**kargs): + logger.opt(depth=1).info(sprint亮靛(*kw)) \ No newline at end of file diff --git a/shared_utils/config_loader.py b/shared_utils/config_loader.py index 3cfbd1d6..5a973a92 100644 --- a/shared_utils/config_loader.py +++ b/shared_utils/config_loader.py @@ -2,7 +2,7 @@ import importlib import time import os from functools import lru_cache -from shared_utils.colorful import print亮红, print亮绿, print亮蓝 +from shared_utils.colorful import log亮红, log亮绿, log亮蓝 pj = os.path.join default_user_name = 'default_user' @@ -30,13 +30,13 @@ def read_env_variable(arg, default_value): env_arg = os.environ[arg] else: raise KeyError - print(f"[ENV_VAR] 尝试加载{arg},默认值:{default_value} --> 修正值:{env_arg}") + log亮绿(f"[ENV_VAR] 尝试加载{arg},默认值:{default_value} --> 修正值:{env_arg}") try: if isinstance(default_value, bool): env_arg = env_arg.strip() if env_arg == 'True': r = True elif env_arg == 'False': r = False - else: print('Enter True or False, but have:', env_arg); r = default_value + else: log亮红('Expect `True` or `False`, but have:', env_arg); r = default_value elif isinstance(default_value, int): r = int(env_arg) elif isinstance(default_value, float): @@ -51,13 +51,13 @@ def read_env_variable(arg, default_value): assert arg == "proxies" r = eval(env_arg) else: - print亮红(f"[ENV_VAR] 环境变量{arg}不支持通过环境变量设置! ") + log亮红(f"[ENV_VAR] 环境变量{arg}不支持通过环境变量设置! ") raise KeyError except: - print亮红(f"[ENV_VAR] 环境变量{arg}加载失败! ") + log亮红(f"[ENV_VAR] 环境变量{arg}加载失败! ") raise KeyError(f"[ENV_VAR] 环境变量{arg}加载失败! ") - print亮绿(f"[ENV_VAR] 成功读取环境变量{arg}") + log亮绿(f"[ENV_VAR] 成功读取环境变量{arg}") return r @@ -80,21 +80,21 @@ def read_single_conf_with_lru_cache(arg): if arg == 'API_URL_REDIRECT': oai_rd = r.get("https://api.openai.com/v1/chat/completions", None) # API_URL_REDIRECT填写格式是错误的,请阅读`https://github.com/binary-husky/gpt_academic/wiki/项目配置说明` if oai_rd and not oai_rd.endswith('/completions'): - print亮红("\n\n[API_URL_REDIRECT] API_URL_REDIRECT填错了。请阅读`https://github.com/binary-husky/gpt_academic/wiki/项目配置说明`。如果您确信自己没填错,无视此消息即可。") + log亮红("\n\n[API_URL_REDIRECT] API_URL_REDIRECT填错了。请阅读`https://github.com/binary-husky/gpt_academic/wiki/项目配置说明`。如果您确信自己没填错,无视此消息即可。") time.sleep(5) if arg == 'API_KEY': - print亮蓝(f"[API_KEY] 本项目现已支持OpenAI和Azure的api-key。也支持同时填写多个api-key,如API_KEY=\"openai-key1,openai-key2,azure-key3\"") - print亮蓝(f"[API_KEY] 您既可以在config.py中修改api-key(s),也可以在问题输入区输入临时的api-key(s),然后回车键提交后即可生效。") + log亮蓝(f"[API_KEY] 本项目现已支持OpenAI和Azure的api-key。也支持同时填写多个api-key,如API_KEY=\"openai-key1,openai-key2,azure-key3\"") + log亮蓝(f"[API_KEY] 您既可以在config.py中修改api-key(s),也可以在问题输入区输入临时的api-key(s),然后回车键提交后即可生效。") if is_any_api_key(r): - print亮绿(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功") + log亮绿(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功") else: - print亮红(f"[API_KEY] 您的 API_KEY({r[:15]}***)不满足任何一种已知的密钥格式,请在config文件中修改API密钥之后再运行(详见`https://github.com/binary-husky/gpt_academic/wiki/api_key`)。") + log亮红(f"[API_KEY] 您的 API_KEY({r[:15]}***)不满足任何一种已知的密钥格式,请在config文件中修改API密钥之后再运行(详见`https://github.com/binary-husky/gpt_academic/wiki/api_key`)。") if arg == 'proxies': if not read_single_conf_with_lru_cache('USE_PROXY'): r = None # 检查USE_PROXY,防止proxies单独起作用 if r is None: - print亮红('[PROXY] 网络代理状态:未配置。无代理状态下很可能无法访问OpenAI家族的模型。建议:检查USE_PROXY选项是否修改。') + log亮红('[PROXY] 网络代理状态:未配置。无代理状态下很可能无法访问OpenAI家族的模型。建议:检查USE_PROXY选项是否修改。') else: - print亮绿('[PROXY] 网络代理状态:已配置。配置信息如下:', r) + log亮绿('[PROXY] 网络代理状态:已配置。配置信息如下:', r) assert isinstance(r, dict), 'proxies格式错误,请注意proxies选项的格式,不要遗漏括号。' return r diff --git a/shared_utils/handle_upload.py b/shared_utils/handle_upload.py index 6ecc0cdd..4e926b60 100644 --- a/shared_utils/handle_upload.py +++ b/shared_utils/handle_upload.py @@ -8,6 +8,7 @@ import gradio import shutil import glob from shared_utils.config_loader import get_conf +from loguru import logger def html_local_file(file): base_path = os.path.dirname(__file__) # 项目目录 @@ -100,7 +101,7 @@ def extract_archive(file_path, dest_dir): with zipfile.ZipFile(file_path, "r") as zipobj: zipobj._extract_member = lambda a,b,c: zip_extract_member_new(zipobj, a,b,c) # 修复中文乱码的问题 zipobj.extractall(path=dest_dir) - print("Successfully extracted zip archive to {}".format(dest_dir)) + logger.info("Successfully extracted zip archive to {}".format(dest_dir)) elif file_extension in [".tar", ".gz", ".bz2"]: with tarfile.open(file_path, "r:*") as tarobj: @@ -113,7 +114,7 @@ def extract_archive(file_path, dest_dir): raise Exception(f"Attempted Path Traversal in {member.name}") tarobj.extractall(path=dest_dir) - print("Successfully extracted tar archive to {}".format(dest_dir)) + logger.info("Successfully extracted tar archive to {}".format(dest_dir)) # 第三方库,需要预先pip install rarfile # 此外,Windows上还需要安装winrar软件,配置其Path环境变量,如"C:\Program Files\WinRAR"才可以 @@ -123,9 +124,9 @@ def extract_archive(file_path, dest_dir): with rarfile.RarFile(file_path) as rf: rf.extractall(path=dest_dir) - print("Successfully extracted rar archive to {}".format(dest_dir)) + logger.info("Successfully extracted rar archive to {}".format(dest_dir)) except: - print("Rar format requires additional dependencies to install") + logger.info("Rar format requires additional dependencies to install") return "\n\n解压失败! 需要安装pip install rarfile来解压rar文件。建议:使用zip压缩格式。" # 第三方库,需要预先pip install py7zr @@ -135,9 +136,9 @@ def extract_archive(file_path, dest_dir): with py7zr.SevenZipFile(file_path, mode="r") as f: f.extractall(path=dest_dir) - print("Successfully extracted 7z archive to {}".format(dest_dir)) + logger.info("Successfully extracted 7z archive to {}".format(dest_dir)) except: - print("7z format requires additional dependencies to install") + logger.info("7z format requires additional dependencies to install") return "\n\n解压失败! 需要安装pip install py7zr来解压7z文件" else: return "" diff --git a/shared_utils/logging.py b/shared_utils/logging.py new file mode 100644 index 00000000..d8a4bd69 --- /dev/null +++ b/shared_utils/logging.py @@ -0,0 +1,69 @@ +from loguru import logger +import logging +import sys +import os + +def chat_log_filter(record): + return "chat_msg" in record["extra"] + +def not_chat_log_filter(record): + return "chat_msg" not in record["extra"] + +def formatter_with_clip(record): + # Note this function returns the string to be formatted, not the actual message to be logged + record["extra"]["serialized"] = "555555" + max_len = 12 + record['function_x'] = record['function'].center(max_len) + if len(record['function_x']) > max_len: + record['function_x'] = ".." + record['function_x'][-(max_len-2):] + record['line_x'] = str(record['line']).ljust(3) + return '{time:HH:mm} | {function_x}:{line_x} | {message}\n' + +def setup_logging(PATH_LOGGING): + + admin_log_path = os.path.join(PATH_LOGGING, "admin") + os.makedirs(admin_log_path, exist_ok=True) + sensitive_log_path = os.path.join(admin_log_path, "chat_secrets.log") + regular_log_path = os.path.join(admin_log_path, "console_log.log") + logger.remove() + logger.configure( + levels=[dict(name="WARNING", color="")], + ) + + logger.add( + sys.stderr, + format=formatter_with_clip, + # format='{time:HH:mm} | {function}:{line} - {message}', + filter=(lambda record: not chat_log_filter(record)), + colorize=True, + enqueue=True + ) + + logger.add( + sensitive_log_path, + format='{time:MM-DD HH:mm:ss} | {level: <8} | {name}:{function}:{line} - {message}', + rotation="10 MB", + filter=chat_log_filter, + enqueue=True, + ) + + logger.add( + regular_log_path, + format='{time:MM-DD HH:mm:ss} | {level: <8} | {name}:{function}:{line} - {message}', + rotation="10 MB", + filter=not_chat_log_filter, + enqueue=True, + ) + + logging.getLogger("httpx").setLevel(logging.WARNING) + + logger.warning(f"所有对话记录将自动保存在本地目录{sensitive_log_path}, 请注意自我隐私保护哦!") + + +# logger.bind(chat_msg=True).info("This message is logged to the file!") +# logger.debug(f"debug message") +# logger.info(f"info message") +# logger.success(f"success message") +# logger.error(f"error message") +# logger.add("special.log", filter=lambda record: "special" in record["extra"]) +# logger.debug("This message is not logged to the file") diff --git a/themes/contrast.py b/themes/contrast.py index 1e988377..faa87f84 100644 --- a/themes/contrast.py +++ b/themes/contrast.py @@ -1,6 +1,7 @@ import os import gradio as gr from toolbox import get_conf +from loguru import logger CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT") theme_dir = os.path.dirname(__file__) @@ -85,7 +86,7 @@ def adjust_theme(): ) except: set_theme = None - print("gradio版本较旧, 不能自定义字体和颜色") + logger.error("gradio版本较旧, 不能自定义字体和颜色") return set_theme diff --git a/themes/default.py b/themes/default.py index a65b0119..1542cb11 100644 --- a/themes/default.py +++ b/themes/default.py @@ -1,6 +1,7 @@ import os import gradio as gr from toolbox import get_conf +from loguru import logger CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT") theme_dir = os.path.dirname(__file__) @@ -84,7 +85,7 @@ def adjust_theme(): ) except: set_theme = None - print("gradio版本较旧, 不能自定义字体和颜色") + logger.error("gradio版本较旧, 不能自定义字体和颜色") return set_theme diff --git a/themes/gradios.py b/themes/gradios.py index d72e7e8d..f4dc43b4 100644 --- a/themes/gradios.py +++ b/themes/gradios.py @@ -1,6 +1,7 @@ import os import gradio as gr from toolbox import get_conf, ProxyNetworkActivate +from loguru import logger CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT") theme_dir = os.path.dirname(__file__) @@ -9,7 +10,7 @@ theme_dir = os.path.dirname(__file__) def dynamic_set_theme(THEME): set_theme = gr.themes.ThemeClass() with ProxyNetworkActivate("Download_Gradio_Theme"): - print("正在下载Gradio主题,请稍等。") + logger.info("正在下载Gradio主题,请稍等。") try: if THEME.startswith("Huggingface-"): THEME = THEME.lstrip("Huggingface-") @@ -17,7 +18,7 @@ def dynamic_set_theme(THEME): THEME = THEME.lstrip("huggingface-") set_theme = set_theme.from_hub(THEME.lower()) except: - print("下载Gradio主题时出现异常。") + logger.error("下载Gradio主题时出现异常。") return set_theme @@ -25,7 +26,7 @@ def adjust_theme(): try: set_theme = gr.themes.ThemeClass() with ProxyNetworkActivate("Download_Gradio_Theme"): - print("正在下载Gradio主题,请稍等。") + logger.info("正在下载Gradio主题,请稍等。") try: THEME = get_conf("THEME") if THEME.startswith("Huggingface-"): @@ -34,7 +35,7 @@ def adjust_theme(): THEME = THEME.lstrip("huggingface-") set_theme = set_theme.from_hub(THEME.lower()) except: - print("下载Gradio主题时出现异常。") + logger.error("下载Gradio主题时出现异常。") from themes.common import get_common_html_javascript_code js = get_common_html_javascript_code() @@ -54,7 +55,7 @@ def adjust_theme(): ) except Exception: set_theme = None - print("gradio版本较旧, 不能自定义字体和颜色。") + logger.error("gradio版本较旧, 不能自定义字体和颜色。") return set_theme diff --git a/themes/green.py b/themes/green.py index bd1179a3..ba681ae0 100644 --- a/themes/green.py +++ b/themes/green.py @@ -1,6 +1,7 @@ import os import gradio as gr from toolbox import get_conf +from loguru import logger CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT") theme_dir = os.path.dirname(__file__) @@ -97,7 +98,7 @@ def adjust_theme(): ) except: set_theme = None - print("gradio版本较旧, 不能自定义字体和颜色") + logger.error("gradio版本较旧, 不能自定义字体和颜色") return set_theme diff --git a/toolbox.py b/toolbox.py index 900cf234..060bf8dc 100644 --- a/toolbox.py +++ b/toolbox.py @@ -8,8 +8,8 @@ import base64 import gradio import shutil import glob -import logging import uuid +from loguru import logger from functools import wraps from textwrap import dedent from shared_utils.config_loader import get_conf @@ -673,7 +673,7 @@ def run_gradio_in_subpath(demo, auth, port, custom_path): if path == "/": return True if len(path) == 0: - print( + logger.info( "ilegal custom path: {}\npath must not be empty\ndeploy on root url".format( path ) @@ -681,10 +681,10 @@ def run_gradio_in_subpath(demo, auth, port, custom_path): return False if path[0] == "/": if path[1] != "/": - print("deploy on sub-path {}".format(path)) + logger.info("deploy on sub-path {}".format(path)) return True return False - print( + logger.info( "ilegal custom path: {}\npath should begin with '/'\ndeploy on root url".format( path ) @@ -787,12 +787,12 @@ def zip_folder(source_folder, dest_folder, zip_name): # Make sure the source folder exists if not os.path.exists(source_folder): - print(f"{source_folder} does not exist") + logger.info(f"{source_folder} does not exist") return # Make sure the destination folder exists if not os.path.exists(dest_folder): - print(f"{dest_folder} does not exist") + logger.info(f"{dest_folder} does not exist") return # Create the name for the zip file @@ -811,7 +811,7 @@ def zip_folder(source_folder, dest_folder, zip_name): os.rename(zip_file, pj(dest_folder, os.path.basename(zip_file))) zip_file = pj(dest_folder, os.path.basename(zip_file)) - print(f"Zip file created at {zip_file}") + logger.info(f"Zip file created at {zip_file}") def zip_result(folder): @@ -1033,10 +1033,20 @@ def log_chat(llm_model: str, input_str: str, output_str: str): try: if output_str and input_str and llm_model: uid = str(uuid.uuid4().hex) - logging.info(f"[Model({uid})] {llm_model}") input_str = input_str.rstrip('\n') - logging.info(f"[Query({uid})]\n{input_str}") output_str = output_str.rstrip('\n') - logging.info(f"[Response({uid})]\n{output_str}\n\n") + logger.bind(chat_msg=True).info(dedent( + """ + ╭──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ + [UID] + {uid} + [Model] + {llm_model} + [Query] + {input_str} + [Response] + {output_str} + ╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ + """).format(uid=uid, llm_model=llm_model, input_str=input_str, output_str=output_str)) except: - print(trimmed_format_exc()) + logger.error(trimmed_format_exc())