镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-05 22:16:49 +00:00
logging -> loguru: stage 3
这个提交包含在:
@@ -1,4 +1,5 @@
|
||||
import glob, shutil, os, re, logging
|
||||
import glob, shutil, os, re
|
||||
from loguru import logger
|
||||
from toolbox import update_ui, trimmed_format_exc, gen_time_str
|
||||
from toolbox import CatchException, report_exception, get_log_folder
|
||||
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||
@@ -34,7 +35,7 @@ class PaperFileGroup():
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.md")
|
||||
logging.info('Segmentation: done')
|
||||
logger.info('Segmentation: done')
|
||||
|
||||
def merge_result(self):
|
||||
self.file_result = ["" for _ in range(len(self.file_paths))]
|
||||
@@ -106,7 +107,7 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
||||
expected_f_name = plugin_kwargs['markdown_expected_output_path']
|
||||
shutil.copyfile(output_file, expected_f_name)
|
||||
except:
|
||||
logging.error(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
|
||||
# <-------- 整理结果,退出 ---------->
|
||||
create_report_file_name = gen_time_str() + f"-chatgpt.md"
|
||||
@@ -126,7 +127,7 @@ def get_files_from_everything(txt, preference=''):
|
||||
proxies = get_conf('proxies')
|
||||
# 网络的远程文件
|
||||
if preference == 'Github':
|
||||
logging.info('正在从github下载资源 ...')
|
||||
logger.info('正在从github下载资源 ...')
|
||||
if not txt.endswith('.md'):
|
||||
# Make a request to the GitHub API to retrieve the repository information
|
||||
url = txt.replace("https://github.com/", "https://api.github.com/repos/") + '/readme'
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import os
|
||||
import logging
|
||||
import threading
|
||||
from loguru import logger
|
||||
from shared_utils.char_visual_effect import scolling_visual_effect
|
||||
@@ -596,7 +595,7 @@ class nougat_interface():
|
||||
def nougat_with_timeout(self, command, cwd, timeout=3600):
|
||||
import subprocess
|
||||
from toolbox import ProxyNetworkActivate
|
||||
logging.info(f'正在执行命令 {command}')
|
||||
logger.info(f'正在执行命令 {command}')
|
||||
with ProxyNetworkActivate("Nougat_Download"):
|
||||
process = subprocess.Popen(command, shell=False, cwd=cwd, env=os.environ)
|
||||
try:
|
||||
|
||||
@@ -24,8 +24,8 @@ class Actor(BaseModel):
|
||||
film_names: List[str] = Field(description="list of names of films they starred in")
|
||||
"""
|
||||
|
||||
import json, re, logging
|
||||
|
||||
import json, re
|
||||
from loguru import logger as logging
|
||||
|
||||
PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import time, logging, json, sys, struct
|
||||
import time, json, sys, struct
|
||||
import numpy as np
|
||||
from loguru import logger as logging
|
||||
from scipy.io.wavfile import WAVE_FORMAT
|
||||
|
||||
def write_numpy_to_wave(filename, rate, data, add_header=False):
|
||||
|
||||
@@ -12,7 +12,6 @@ import json
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import logging
|
||||
import traceback
|
||||
import requests
|
||||
import random
|
||||
@@ -317,7 +316,6 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
||||
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||
# logging.info(f'[response] {gpt_replying_buffer}')
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
break
|
||||
# 处理数据流的主体
|
||||
@@ -486,7 +484,6 @@ def generate_payload(inputs:str, llm_kwargs:dict, history:list, system_prompt:st
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-0301",
|
||||
])
|
||||
logging.info("Random select model:" + model)
|
||||
|
||||
payload = {
|
||||
"model": model,
|
||||
|
||||
@@ -8,15 +8,14 @@
|
||||
2. predict_no_ui_long_connection:支持多线程
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
import requests
|
||||
import base64
|
||||
import os
|
||||
import glob
|
||||
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc, is_the_upload_folder, \
|
||||
update_ui_lastest_msg, get_max_token, encode_image, have_any_recent_upload_image_files
|
||||
update_ui_lastest_msg, get_max_token, encode_image, have_any_recent_upload_image_files, log_chat
|
||||
|
||||
|
||||
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
||||
@@ -100,7 +99,6 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
raw_input = inputs
|
||||
logging.info(f'[raw_input] {raw_input}')
|
||||
def make_media_input(inputs, image_paths):
|
||||
for image_path in image_paths:
|
||||
inputs = inputs + f'<br/><br/><div align="center"><img src="file={os.path.abspath(image_path)}"></div>'
|
||||
@@ -185,7 +183,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||
lastmsg = chatbot[-1][-1] + f"\n\n\n\n「{llm_kwargs['llm_model']}调用结束,该模型不具备上下文对话能力,如需追问,请及时切换模型。」"
|
||||
yield from update_ui_lastest_msg(lastmsg, chatbot, history, delay=1)
|
||||
logging.info(f'[response] {gpt_replying_buffer}')
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
break
|
||||
# 处理数据流的主体
|
||||
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||
|
||||
@@ -13,10 +13,10 @@
|
||||
import json
|
||||
import time
|
||||
import gradio as gr
|
||||
import logging
|
||||
import traceback
|
||||
import requests
|
||||
import importlib
|
||||
from loguru import logger as logging
|
||||
|
||||
# config_private.py放自己的秘密如API和代理网址
|
||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||
|
||||
@@ -9,13 +9,14 @@
|
||||
具备多线程调用能力的函数
|
||||
2. predict_no_ui_long_connection:支持多线程
|
||||
"""
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
import traceback
|
||||
import json
|
||||
import requests
|
||||
from loguru import logger as logging
|
||||
from toolbox import get_conf, update_ui, trimmed_format_exc, encode_image, every_image_file_in_path, log_chat
|
||||
|
||||
picture_system_prompt = "\n当回复图像时,必须说明正在回复哪张图像。所有图像仅在最后一个问题中提供,即使它们在历史记录中被提及。请使用'这是第X张图像:'的格式来指明您正在描述的是哪张图像。"
|
||||
Claude_3_Models = ["claude-3-haiku-20240307", "claude-3-sonnet-20240229", "claude-3-opus-20240229", "claude-3-5-sonnet-20240620"]
|
||||
|
||||
|
||||
@@ -13,11 +13,9 @@
|
||||
import json
|
||||
import time
|
||||
import gradio as gr
|
||||
import logging
|
||||
import traceback
|
||||
import requests
|
||||
import importlib
|
||||
import random
|
||||
from loguru import logger as logging
|
||||
|
||||
# config_private.py放自己的秘密如API和代理网址
|
||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||
|
||||
@@ -65,10 +65,10 @@ class GetInternlmHandle(LocalLLMHandle):
|
||||
|
||||
def llm_stream_generator(self, **kwargs):
|
||||
import torch
|
||||
import logging
|
||||
import copy
|
||||
import warnings
|
||||
import torch.nn as nn
|
||||
from loguru import logger as logging
|
||||
from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig
|
||||
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
@@ -119,7 +119,7 @@ class GetInternlmHandle(LocalLLMHandle):
|
||||
elif generation_config.max_new_tokens is not None:
|
||||
generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
|
||||
if not has_default_max_length:
|
||||
logging.warn(
|
||||
logging.warning(
|
||||
f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
|
||||
f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
|
||||
"Please refer to the documentation for more information. "
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import logging
|
||||
|
||||
from toolbox import get_conf, update_ui, log_chat
|
||||
from toolbox import ChatBotWithCookies
|
||||
|
||||
@@ -13,11 +13,11 @@
|
||||
import json
|
||||
import time
|
||||
import gradio as gr
|
||||
import logging
|
||||
import traceback
|
||||
import requests
|
||||
import importlib
|
||||
import random
|
||||
from loguru import logger as logging
|
||||
|
||||
# config_private.py放自己的秘密如API和代理网址
|
||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
import time
|
||||
import asyncio
|
||||
import threading
|
||||
import importlib
|
||||
|
||||
from .bridge_newbingfree import preprocess_newbing_out, preprocess_newbing_out_simple
|
||||
from multiprocessing import Process, Pipe
|
||||
from toolbox import update_ui, get_conf, trimmed_format_exc
|
||||
import threading
|
||||
import importlib
|
||||
import logging
|
||||
import time
|
||||
from loguru import logger as logging
|
||||
from toolbox import get_conf
|
||||
import asyncio
|
||||
|
||||
load_message = "正在加载Claude组件,请稍候..."
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ import json
|
||||
import random
|
||||
import string
|
||||
import websockets
|
||||
import logging
|
||||
import time
|
||||
import threading
|
||||
import importlib
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from http import HTTPStatus
|
||||
from toolbox import get_conf
|
||||
import threading
|
||||
import logging
|
||||
|
||||
timeout_bot_msg = '[Local Message] Request timeout. Network error.'
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from toolbox import get_conf
|
||||
import threading
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
from toolbox import get_conf
|
||||
from loguru import logger as logging
|
||||
|
||||
timeout_bot_msg = '[Local Message] Request timeout. Network error.'
|
||||
#os.environ['VOLC_ACCESSKEY'] = ''
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# @Descr : 兼容最新的智谱Ai
|
||||
from toolbox import get_conf
|
||||
from toolbox import get_conf, encode_image, get_pictures_list
|
||||
import logging, os, requests
|
||||
import requests
|
||||
import json
|
||||
class TaichuChatInit:
|
||||
def __init__(self): ...
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
import traceback
|
||||
import requests
|
||||
from loguru import logger as logging
|
||||
|
||||
# config_private.py放自己的秘密如API和代理网址
|
||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||
|
||||
@@ -2,6 +2,8 @@ import markdown
|
||||
import re
|
||||
import os
|
||||
import math
|
||||
|
||||
from loguru import logger
|
||||
from textwrap import dedent
|
||||
from functools import lru_cache
|
||||
from pymdownx.superfences import fence_code_format
|
||||
@@ -227,14 +229,14 @@ def fix_dollar_sticking_bug(txt):
|
||||
|
||||
if single_stack_height > 0:
|
||||
if txt[:(index+1)].find('\n') > 0 or txt[:(index+1)].find('<td>') > 0 or txt[:(index+1)].find('</td>') > 0:
|
||||
print('公式之中出现了异常 (Unexpect element in equation)')
|
||||
logger.error('公式之中出现了异常 (Unexpect element in equation)')
|
||||
single_stack_height = 0
|
||||
txt_result += ' $'
|
||||
continue
|
||||
|
||||
if double_stack_height > 0:
|
||||
if txt[:(index+1)].find('\n\n') > 0:
|
||||
print('公式之中出现了异常 (Unexpect element in equation)')
|
||||
logger.error('公式之中出现了异常 (Unexpect element in equation)')
|
||||
double_stack_height = 0
|
||||
txt_result += '$$'
|
||||
continue
|
||||
@@ -253,13 +255,13 @@ def fix_dollar_sticking_bug(txt):
|
||||
txt = txt[(index+2):]
|
||||
else:
|
||||
if double_stack_height != 0:
|
||||
# print(txt[:(index)])
|
||||
print('发现异常嵌套公式')
|
||||
# logger.info(txt[:(index)])
|
||||
logger.info('发现异常嵌套公式')
|
||||
if single_stack_height == 0:
|
||||
single_stack_height = 1
|
||||
else:
|
||||
single_stack_height = 0
|
||||
# print(txt[:(index)])
|
||||
# logger.info(txt[:(index)])
|
||||
txt_result += txt[:(index+1)]
|
||||
txt = txt[(index+1):]
|
||||
break
|
||||
|
||||
@@ -30,13 +30,13 @@ def read_env_variable(arg, default_value):
|
||||
env_arg = os.environ[arg]
|
||||
else:
|
||||
raise KeyError
|
||||
print(f"[ENV_VAR] 尝试加载{arg},默认值:{default_value} --> 修正值:{env_arg}")
|
||||
log亮绿(f"[ENV_VAR] 尝试加载{arg},默认值:{default_value} --> 修正值:{env_arg}")
|
||||
try:
|
||||
if isinstance(default_value, bool):
|
||||
env_arg = env_arg.strip()
|
||||
if env_arg == 'True': r = True
|
||||
elif env_arg == 'False': r = False
|
||||
else: print('Enter True or False, but have:', env_arg); r = default_value
|
||||
else: log亮红('Expect `True` or `False`, but have:', env_arg); r = default_value
|
||||
elif isinstance(default_value, int):
|
||||
r = int(env_arg)
|
||||
elif isinstance(default_value, float):
|
||||
|
||||
@@ -8,6 +8,7 @@ import gradio
|
||||
import shutil
|
||||
import glob
|
||||
from shared_utils.config_loader import get_conf
|
||||
from loguru import logger
|
||||
|
||||
def html_local_file(file):
|
||||
base_path = os.path.dirname(__file__) # 项目目录
|
||||
@@ -100,7 +101,7 @@ def extract_archive(file_path, dest_dir):
|
||||
with zipfile.ZipFile(file_path, "r") as zipobj:
|
||||
zipobj._extract_member = lambda a,b,c: zip_extract_member_new(zipobj, a,b,c) # 修复中文乱码的问题
|
||||
zipobj.extractall(path=dest_dir)
|
||||
print("Successfully extracted zip archive to {}".format(dest_dir))
|
||||
logger.info("Successfully extracted zip archive to {}".format(dest_dir))
|
||||
|
||||
elif file_extension in [".tar", ".gz", ".bz2"]:
|
||||
with tarfile.open(file_path, "r:*") as tarobj:
|
||||
@@ -113,7 +114,7 @@ def extract_archive(file_path, dest_dir):
|
||||
raise Exception(f"Attempted Path Traversal in {member.name}")
|
||||
|
||||
tarobj.extractall(path=dest_dir)
|
||||
print("Successfully extracted tar archive to {}".format(dest_dir))
|
||||
logger.info("Successfully extracted tar archive to {}".format(dest_dir))
|
||||
|
||||
# 第三方库,需要预先pip install rarfile
|
||||
# 此外,Windows上还需要安装winrar软件,配置其Path环境变量,如"C:\Program Files\WinRAR"才可以
|
||||
@@ -123,9 +124,9 @@ def extract_archive(file_path, dest_dir):
|
||||
|
||||
with rarfile.RarFile(file_path) as rf:
|
||||
rf.extractall(path=dest_dir)
|
||||
print("Successfully extracted rar archive to {}".format(dest_dir))
|
||||
logger.info("Successfully extracted rar archive to {}".format(dest_dir))
|
||||
except:
|
||||
print("Rar format requires additional dependencies to install")
|
||||
logger.info("Rar format requires additional dependencies to install")
|
||||
return "\n\n解压失败! 需要安装pip install rarfile来解压rar文件。建议:使用zip压缩格式。"
|
||||
|
||||
# 第三方库,需要预先pip install py7zr
|
||||
@@ -135,9 +136,9 @@ def extract_archive(file_path, dest_dir):
|
||||
|
||||
with py7zr.SevenZipFile(file_path, mode="r") as f:
|
||||
f.extractall(path=dest_dir)
|
||||
print("Successfully extracted 7z archive to {}".format(dest_dir))
|
||||
logger.info("Successfully extracted 7z archive to {}".format(dest_dir))
|
||||
except:
|
||||
print("7z format requires additional dependencies to install")
|
||||
logger.info("7z format requires additional dependencies to install")
|
||||
return "\n\n解压失败! 需要安装pip install py7zr来解压7z文件"
|
||||
else:
|
||||
return ""
|
||||
|
||||
@@ -59,11 +59,8 @@ def setup_logging(PATH_LOGGING):
|
||||
|
||||
logger.warning(f"所有对话记录将自动保存在本地目录{sensitive_log_path}, 请注意自我隐私保护哦!")
|
||||
|
||||
logger.bind(chat_msg=True).info("This message is logged to the file!")
|
||||
|
||||
|
||||
|
||||
|
||||
# logger.bind(chat_msg=True).info("This message is logged to the file!")
|
||||
# logger.debug(f"debug message")
|
||||
# logger.info(f"info message")
|
||||
# logger.success(f"success message")
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
import gradio as gr
|
||||
from toolbox import get_conf
|
||||
from loguru import logger
|
||||
|
||||
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT")
|
||||
theme_dir = os.path.dirname(__file__)
|
||||
@@ -85,7 +86,7 @@ def adjust_theme():
|
||||
)
|
||||
except:
|
||||
set_theme = None
|
||||
print("gradio版本较旧, 不能自定义字体和颜色")
|
||||
logger.error("gradio版本较旧, 不能自定义字体和颜色")
|
||||
return set_theme
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
import gradio as gr
|
||||
from toolbox import get_conf
|
||||
from loguru import logger
|
||||
|
||||
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT")
|
||||
theme_dir = os.path.dirname(__file__)
|
||||
@@ -84,7 +85,7 @@ def adjust_theme():
|
||||
)
|
||||
except:
|
||||
set_theme = None
|
||||
print("gradio版本较旧, 不能自定义字体和颜色")
|
||||
logger.error("gradio版本较旧, 不能自定义字体和颜色")
|
||||
return set_theme
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
import gradio as gr
|
||||
from toolbox import get_conf, ProxyNetworkActivate
|
||||
from loguru import logger
|
||||
|
||||
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT")
|
||||
theme_dir = os.path.dirname(__file__)
|
||||
@@ -9,7 +10,7 @@ theme_dir = os.path.dirname(__file__)
|
||||
def dynamic_set_theme(THEME):
|
||||
set_theme = gr.themes.ThemeClass()
|
||||
with ProxyNetworkActivate("Download_Gradio_Theme"):
|
||||
print("正在下载Gradio主题,请稍等。")
|
||||
logger.info("正在下载Gradio主题,请稍等。")
|
||||
try:
|
||||
if THEME.startswith("Huggingface-"):
|
||||
THEME = THEME.lstrip("Huggingface-")
|
||||
@@ -17,7 +18,7 @@ def dynamic_set_theme(THEME):
|
||||
THEME = THEME.lstrip("huggingface-")
|
||||
set_theme = set_theme.from_hub(THEME.lower())
|
||||
except:
|
||||
print("下载Gradio主题时出现异常。")
|
||||
logger.error("下载Gradio主题时出现异常。")
|
||||
return set_theme
|
||||
|
||||
|
||||
@@ -25,7 +26,7 @@ def adjust_theme():
|
||||
try:
|
||||
set_theme = gr.themes.ThemeClass()
|
||||
with ProxyNetworkActivate("Download_Gradio_Theme"):
|
||||
print("正在下载Gradio主题,请稍等。")
|
||||
logger.info("正在下载Gradio主题,请稍等。")
|
||||
try:
|
||||
THEME = get_conf("THEME")
|
||||
if THEME.startswith("Huggingface-"):
|
||||
@@ -34,7 +35,7 @@ def adjust_theme():
|
||||
THEME = THEME.lstrip("huggingface-")
|
||||
set_theme = set_theme.from_hub(THEME.lower())
|
||||
except:
|
||||
print("下载Gradio主题时出现异常。")
|
||||
logger.error("下载Gradio主题时出现异常。")
|
||||
|
||||
from themes.common import get_common_html_javascript_code
|
||||
js = get_common_html_javascript_code()
|
||||
@@ -54,7 +55,7 @@ def adjust_theme():
|
||||
)
|
||||
except Exception:
|
||||
set_theme = None
|
||||
print("gradio版本较旧, 不能自定义字体和颜色。")
|
||||
logger.error("gradio版本较旧, 不能自定义字体和颜色。")
|
||||
return set_theme
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
import gradio as gr
|
||||
from toolbox import get_conf
|
||||
from loguru import logger
|
||||
|
||||
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT")
|
||||
theme_dir = os.path.dirname(__file__)
|
||||
@@ -97,7 +98,7 @@ def adjust_theme():
|
||||
)
|
||||
except:
|
||||
set_theme = None
|
||||
print("gradio版本较旧, 不能自定义字体和颜色")
|
||||
logger.error("gradio版本较旧, 不能自定义字体和颜色")
|
||||
return set_theme
|
||||
|
||||
|
||||
|
||||
17
toolbox.py
17
toolbox.py
@@ -8,7 +8,6 @@ import base64
|
||||
import gradio
|
||||
import shutil
|
||||
import glob
|
||||
import logging
|
||||
import uuid
|
||||
from loguru import logger
|
||||
from functools import wraps
|
||||
@@ -1034,10 +1033,20 @@ def log_chat(llm_model: str, input_str: str, output_str: str):
|
||||
try:
|
||||
if output_str and input_str and llm_model:
|
||||
uid = str(uuid.uuid4().hex)
|
||||
logging.info(f"[Model({uid})] {llm_model}")
|
||||
input_str = input_str.rstrip('\n')
|
||||
logging.info(f"[Query({uid})]\n{input_str}")
|
||||
output_str = output_str.rstrip('\n')
|
||||
logging.info(f"[Response({uid})]\n{output_str}\n\n")
|
||||
logger.bind(chat_msg=True).info(dedent(
|
||||
"""
|
||||
╭──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
||||
[UID]
|
||||
{uid}
|
||||
[Model]
|
||||
{llm_model}
|
||||
[Query]
|
||||
{input_str}
|
||||
[Response]
|
||||
{output_str}
|
||||
╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
""").format(uid=uid, llm_model=llm_model, input_str=input_str, output_str=output_str))
|
||||
except:
|
||||
logger.error(trimmed_format_exc())
|
||||
|
||||
在新工单中引用
屏蔽一个用户