镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 06:26:47 +00:00
logging sys to loguru: stage 1 complete
这个提交包含在:
@@ -1,3 +1,4 @@
|
||||
from loguru import logger
|
||||
|
||||
def check_proxy(proxies, return_ip=False):
|
||||
import requests
|
||||
@@ -19,14 +20,14 @@ def check_proxy(proxies, return_ip=False):
|
||||
else:
|
||||
result = f"代理配置 {proxies_https}, 代理数据解析失败:{data}"
|
||||
if not return_ip:
|
||||
print(result)
|
||||
logger.warning(result)
|
||||
return result
|
||||
else:
|
||||
return ip
|
||||
except:
|
||||
result = f"代理配置 {proxies_https}, 代理所在地查询超时,代理可能无效"
|
||||
if not return_ip:
|
||||
print(result)
|
||||
logger.warning(result)
|
||||
return result
|
||||
else:
|
||||
return ip
|
||||
@@ -148,9 +149,9 @@ def auto_update(raise_error=False):
|
||||
if raise_error:
|
||||
from toolbox import trimmed_format_exc
|
||||
msg += trimmed_format_exc()
|
||||
print(msg)
|
||||
logger.warning(msg)
|
||||
else:
|
||||
print('自动更新程序:已禁用')
|
||||
logger.info('自动更新程序:已禁用')
|
||||
return
|
||||
else:
|
||||
return
|
||||
@@ -159,10 +160,10 @@ def auto_update(raise_error=False):
|
||||
if raise_error:
|
||||
from toolbox import trimmed_format_exc
|
||||
msg += trimmed_format_exc()
|
||||
print(msg)
|
||||
logger.info(msg)
|
||||
|
||||
def warm_up_modules():
|
||||
print('正在执行一些模块的预热 ...')
|
||||
logger.info('正在执行一些模块的预热 ...')
|
||||
from toolbox import ProxyNetworkActivate
|
||||
from request_llms.bridge_all import model_info
|
||||
with ProxyNetworkActivate("Warmup_Modules"):
|
||||
@@ -172,7 +173,7 @@ def warm_up_modules():
|
||||
enc.encode("模块预热", disallowed_special=())
|
||||
|
||||
def warm_up_vectordb():
|
||||
print('正在执行一些模块的预热 ...')
|
||||
logger.info('正在执行一些模块的预热 ...')
|
||||
from toolbox import ProxyNetworkActivate
|
||||
with ProxyNetworkActivate("Warmup_Modules"):
|
||||
import nltk
|
||||
|
||||
29
main.py
29
main.py
@@ -13,16 +13,10 @@ help_menu_description = \
|
||||
</br></br>如何语音对话: 请阅读Wiki
|
||||
</br></br>如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)"""
|
||||
|
||||
from loguru import logger
|
||||
def enable_log(PATH_LOGGING):
|
||||
import logging
|
||||
admin_log_path = os.path.join(PATH_LOGGING, "admin")
|
||||
os.makedirs(admin_log_path, exist_ok=True)
|
||||
log_dir = os.path.join(admin_log_path, "chat_secrets.log")
|
||||
try:logging.basicConfig(filename=log_dir, level=logging.INFO, encoding="utf-8", format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
||||
except:logging.basicConfig(filename=log_dir, level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
||||
# Disable logging output from the 'httpx' logger
|
||||
logging.getLogger("httpx").setLevel(logging.WARNING)
|
||||
print(f"所有对话记录将自动保存在本地目录{log_dir}, 请注意自我隐私保护哦!")
|
||||
from shared_utils.logging import setup_logging
|
||||
setup_logging(PATH_LOGGING)
|
||||
|
||||
def encode_plugin_info(k, plugin)->str:
|
||||
import copy
|
||||
@@ -42,9 +36,16 @@ def main():
|
||||
import gradio as gr
|
||||
if gr.__version__ not in ['3.32.9', '3.32.10', '3.32.11']:
|
||||
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
||||
from request_llms.bridge_all import predict
|
||||
|
||||
# 一些基础工具
|
||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
||||
|
||||
# 对话、日志记录
|
||||
enable_log(get_conf("PATH_LOGGING"))
|
||||
|
||||
# 对话句柄
|
||||
from request_llms.bridge_all import predict
|
||||
|
||||
# 读取配置
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
||||
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
||||
@@ -61,8 +62,6 @@ def main():
|
||||
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, assign_user_uuid
|
||||
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
|
||||
|
||||
# 对话、日志记录
|
||||
enable_log(PATH_LOGGING)
|
||||
|
||||
# 一些普通功能模块
|
||||
from core_functional import get_core_functions
|
||||
@@ -339,9 +338,9 @@ def main():
|
||||
# Gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
||||
def run_delayed_tasks():
|
||||
import threading, webbrowser, time
|
||||
print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
|
||||
if DARK_MODE: print(f"\t「暗色主题已启用(支持动态切换主题)」: http://localhost:{PORT}")
|
||||
else: print(f"\t「亮色主题已启用(支持动态切换主题)」: http://localhost:{PORT}")
|
||||
logger.info(f"如果浏览器没有自动打开,请复制并转到以下URL:")
|
||||
if DARK_MODE: logger.info(f"\t「暗色主题已启用(支持动态切换主题)」: http://localhost:{PORT}")
|
||||
else: logger.info(f"\t「亮色主题已启用(支持动态切换主题)」: http://localhost:{PORT}")
|
||||
|
||||
def auto_updates(): time.sleep(0); auto_update()
|
||||
def open_browser(): time.sleep(2); webbrowser.open_new_tab(f"http://localhost:{PORT}")
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
2. predict_no_ui_long_connection(...)
|
||||
"""
|
||||
import tiktoken, copy, re
|
||||
from loguru import logger
|
||||
from functools import lru_cache
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from toolbox import get_conf, trimmed_format_exc, apply_gpt_academic_string_mask, read_one_api_model_name
|
||||
@@ -51,9 +52,9 @@ class LazyloadTiktoken(object):
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=128)
|
||||
def get_encoder(model):
|
||||
print('正在加载tokenizer,如果是第一次运行,可能需要一点时间下载参数')
|
||||
logger.info('正在加载tokenizer,如果是第一次运行,可能需要一点时间下载参数')
|
||||
tmp = tiktoken.encoding_for_model(model)
|
||||
print('加载tokenizer完毕')
|
||||
logger.info('加载tokenizer完毕')
|
||||
return tmp
|
||||
|
||||
def encode(self, *args, **kwargs):
|
||||
@@ -83,7 +84,7 @@ try:
|
||||
API_URL = get_conf("API_URL")
|
||||
if API_URL != "https://api.openai.com/v1/chat/completions":
|
||||
openai_endpoint = API_URL
|
||||
print("警告!API_URL配置选项将被弃用,请更换为API_URL_REDIRECT配置")
|
||||
logger.warning("警告!API_URL配置选项将被弃用,请更换为API_URL_REDIRECT配置")
|
||||
except:
|
||||
pass
|
||||
# 新版配置
|
||||
@@ -662,7 +663,7 @@ if "newbing" in AVAIL_LLM_MODELS: # same with newbing-free
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
if "chatglmft" in AVAIL_LLM_MODELS: # same with newbing-free
|
||||
try:
|
||||
from .bridge_chatglmft import predict_no_ui_long_connection as chatglmft_noui
|
||||
@@ -678,7 +679,7 @@ if "chatglmft" in AVAIL_LLM_MODELS: # same with newbing-free
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 上海AI-LAB书生大模型 -=-=-=-=-=-=-
|
||||
if "internlm" in AVAIL_LLM_MODELS:
|
||||
try:
|
||||
@@ -695,7 +696,7 @@ if "internlm" in AVAIL_LLM_MODELS:
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
if "chatglm_onnx" in AVAIL_LLM_MODELS:
|
||||
try:
|
||||
from .bridge_chatglmonnx import predict_no_ui_long_connection as chatglm_onnx_noui
|
||||
@@ -711,7 +712,7 @@ if "chatglm_onnx" in AVAIL_LLM_MODELS:
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 通义-本地模型 -=-=-=-=-=-=-
|
||||
if "qwen-local" in AVAIL_LLM_MODELS:
|
||||
try:
|
||||
@@ -729,7 +730,7 @@ if "qwen-local" in AVAIL_LLM_MODELS:
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 通义-在线模型 -=-=-=-=-=-=-
|
||||
if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-max" in AVAIL_LLM_MODELS: # zhipuai
|
||||
try:
|
||||
@@ -765,7 +766,7 @@ if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 零一万物模型 -=-=-=-=-=-=-
|
||||
yi_models = ["yi-34b-chat-0205","yi-34b-chat-200k","yi-large","yi-medium","yi-spark","yi-large-turbo","yi-large-preview"]
|
||||
if any(item in yi_models for item in AVAIL_LLM_MODELS):
|
||||
@@ -845,7 +846,7 @@ if any(item in yi_models for item in AVAIL_LLM_MODELS):
|
||||
},
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 讯飞星火认知大模型 -=-=-=-=-=-=-
|
||||
if "spark" in AVAIL_LLM_MODELS:
|
||||
try:
|
||||
@@ -863,7 +864,7 @@ if "spark" in AVAIL_LLM_MODELS:
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
if "sparkv2" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
||||
try:
|
||||
from .bridge_spark import predict_no_ui_long_connection as spark_noui
|
||||
@@ -880,7 +881,7 @@ if "sparkv2" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
if any(x in AVAIL_LLM_MODELS for x in ("sparkv3", "sparkv3.5", "sparkv4")): # 讯飞星火认知大模型
|
||||
try:
|
||||
from .bridge_spark import predict_no_ui_long_connection as spark_noui
|
||||
@@ -915,7 +916,7 @@ if any(x in AVAIL_LLM_MODELS for x in ("sparkv3", "sparkv3.5", "sparkv4")): #
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
if "llama2" in AVAIL_LLM_MODELS: # llama2
|
||||
try:
|
||||
from .bridge_llama2 import predict_no_ui_long_connection as llama2_noui
|
||||
@@ -931,7 +932,7 @@ if "llama2" in AVAIL_LLM_MODELS: # llama2
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 智谱 -=-=-=-=-=-=-
|
||||
if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai 是glm-4的别名,向后兼容配置
|
||||
try:
|
||||
@@ -946,7 +947,7 @@ if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai 是glm-4的别名,向后兼容
|
||||
},
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 幻方-深度求索大模型 -=-=-=-=-=-=-
|
||||
if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
||||
try:
|
||||
@@ -963,7 +964,7 @@ if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 幻方-深度求索大模型在线API -=-=-=-=-=-=-
|
||||
if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS:
|
||||
try:
|
||||
@@ -991,7 +992,7 @@ if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS:
|
||||
},
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- one-api 对齐支持 -=-=-=-=-=-=-
|
||||
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("one-api-")]:
|
||||
# 为了更灵活地接入one-api多模型管理界面,设计了此接口,例子:AVAIL_LLM_MODELS = ["one-api-mixtral-8x7b(max_token=6666)"]
|
||||
|
||||
@@ -28,6 +28,7 @@ edge-tts
|
||||
pymupdf
|
||||
openai
|
||||
rjsmin
|
||||
loguru
|
||||
arxiv
|
||||
numpy
|
||||
rich
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import platform
|
||||
from sys import stdout
|
||||
from loguru import logger
|
||||
|
||||
if platform.system()=="Linux":
|
||||
pass
|
||||
@@ -59,3 +60,30 @@ def sprint亮紫(*kw):
|
||||
return "\033[1;35m"+' '.join(kw)+"\033[0m"
|
||||
def sprint亮靛(*kw):
|
||||
return "\033[1;36m"+' '.join(kw)+"\033[0m"
|
||||
|
||||
|
||||
def log红(*kw,**kargs):
|
||||
logger.opt(depth=1).info(sprint红(*kw))
|
||||
def log绿(*kw,**kargs):
|
||||
logger.opt(depth=1).info(sprint绿(*kw))
|
||||
def log黄(*kw,**kargs):
|
||||
logger.opt(depth=1).info(sprint黄(*kw))
|
||||
def log蓝(*kw,**kargs):
|
||||
logger.opt(depth=1).info(sprint蓝(*kw))
|
||||
def log紫(*kw,**kargs):
|
||||
logger.opt(depth=1).info(sprint紫(*kw))
|
||||
def log靛(*kw,**kargs):
|
||||
logger.opt(depth=1).info(sprint靛(*kw))
|
||||
|
||||
def log亮红(*kw,**kargs):
|
||||
logger.opt(depth=1).info(sprint亮红(*kw))
|
||||
def log亮绿(*kw,**kargs):
|
||||
logger.opt(depth=1).info(sprint亮绿(*kw))
|
||||
def log亮黄(*kw,**kargs):
|
||||
logger.opt(depth=1).info(sprint亮黄(*kw))
|
||||
def log亮蓝(*kw,**kargs):
|
||||
logger.opt(depth=1).info(sprint亮蓝(*kw))
|
||||
def log亮紫(*kw,**kargs):
|
||||
logger.opt(depth=1).info(sprint亮紫(*kw))
|
||||
def log亮靛(*kw,**kargs):
|
||||
logger.opt(depth=1).info(sprint亮靛(*kw))
|
||||
@@ -2,7 +2,7 @@ import importlib
|
||||
import time
|
||||
import os
|
||||
from functools import lru_cache
|
||||
from shared_utils.colorful import print亮红, print亮绿, print亮蓝
|
||||
from shared_utils.colorful import log亮红, log亮绿, log亮蓝
|
||||
|
||||
pj = os.path.join
|
||||
default_user_name = 'default_user'
|
||||
@@ -51,13 +51,13 @@ def read_env_variable(arg, default_value):
|
||||
assert arg == "proxies"
|
||||
r = eval(env_arg)
|
||||
else:
|
||||
print亮红(f"[ENV_VAR] 环境变量{arg}不支持通过环境变量设置! ")
|
||||
log亮红(f"[ENV_VAR] 环境变量{arg}不支持通过环境变量设置! ")
|
||||
raise KeyError
|
||||
except:
|
||||
print亮红(f"[ENV_VAR] 环境变量{arg}加载失败! ")
|
||||
log亮红(f"[ENV_VAR] 环境变量{arg}加载失败! ")
|
||||
raise KeyError(f"[ENV_VAR] 环境变量{arg}加载失败! ")
|
||||
|
||||
print亮绿(f"[ENV_VAR] 成功读取环境变量{arg}")
|
||||
log亮绿(f"[ENV_VAR] 成功读取环境变量{arg}")
|
||||
return r
|
||||
|
||||
|
||||
@@ -80,21 +80,21 @@ def read_single_conf_with_lru_cache(arg):
|
||||
if arg == 'API_URL_REDIRECT':
|
||||
oai_rd = r.get("https://api.openai.com/v1/chat/completions", None) # API_URL_REDIRECT填写格式是错误的,请阅读`https://github.com/binary-husky/gpt_academic/wiki/项目配置说明`
|
||||
if oai_rd and not oai_rd.endswith('/completions'):
|
||||
print亮红("\n\n[API_URL_REDIRECT] API_URL_REDIRECT填错了。请阅读`https://github.com/binary-husky/gpt_academic/wiki/项目配置说明`。如果您确信自己没填错,无视此消息即可。")
|
||||
log亮红("\n\n[API_URL_REDIRECT] API_URL_REDIRECT填错了。请阅读`https://github.com/binary-husky/gpt_academic/wiki/项目配置说明`。如果您确信自己没填错,无视此消息即可。")
|
||||
time.sleep(5)
|
||||
if arg == 'API_KEY':
|
||||
print亮蓝(f"[API_KEY] 本项目现已支持OpenAI和Azure的api-key。也支持同时填写多个api-key,如API_KEY=\"openai-key1,openai-key2,azure-key3\"")
|
||||
print亮蓝(f"[API_KEY] 您既可以在config.py中修改api-key(s),也可以在问题输入区输入临时的api-key(s),然后回车键提交后即可生效。")
|
||||
log亮蓝(f"[API_KEY] 本项目现已支持OpenAI和Azure的api-key。也支持同时填写多个api-key,如API_KEY=\"openai-key1,openai-key2,azure-key3\"")
|
||||
log亮蓝(f"[API_KEY] 您既可以在config.py中修改api-key(s),也可以在问题输入区输入临时的api-key(s),然后回车键提交后即可生效。")
|
||||
if is_any_api_key(r):
|
||||
print亮绿(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功")
|
||||
log亮绿(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功")
|
||||
else:
|
||||
print亮红(f"[API_KEY] 您的 API_KEY({r[:15]}***)不满足任何一种已知的密钥格式,请在config文件中修改API密钥之后再运行(详见`https://github.com/binary-husky/gpt_academic/wiki/api_key`)。")
|
||||
log亮红(f"[API_KEY] 您的 API_KEY({r[:15]}***)不满足任何一种已知的密钥格式,请在config文件中修改API密钥之后再运行(详见`https://github.com/binary-husky/gpt_academic/wiki/api_key`)。")
|
||||
if arg == 'proxies':
|
||||
if not read_single_conf_with_lru_cache('USE_PROXY'): r = None # 检查USE_PROXY,防止proxies单独起作用
|
||||
if r is None:
|
||||
print亮红('[PROXY] 网络代理状态:未配置。无代理状态下很可能无法访问OpenAI家族的模型。建议:检查USE_PROXY选项是否修改。')
|
||||
log亮红('[PROXY] 网络代理状态:未配置。无代理状态下很可能无法访问OpenAI家族的模型。建议:检查USE_PROXY选项是否修改。')
|
||||
else:
|
||||
print亮绿('[PROXY] 网络代理状态:已配置。配置信息如下:', r)
|
||||
log亮绿('[PROXY] 网络代理状态:已配置。配置信息如下:', r)
|
||||
assert isinstance(r, dict), 'proxies格式错误,请注意proxies选项的格式,不要遗漏括号。'
|
||||
return r
|
||||
|
||||
|
||||
72
shared_utils/logging.py
普通文件
72
shared_utils/logging.py
普通文件
@@ -0,0 +1,72 @@
|
||||
from loguru import logger
|
||||
import logging
|
||||
import sys
|
||||
import os
|
||||
|
||||
def chat_log_filter(record):
|
||||
return "chat_msg" in record["extra"]
|
||||
|
||||
def not_chat_log_filter(record):
|
||||
return "chat_msg" not in record["extra"]
|
||||
|
||||
def formatter_with_clip(record):
|
||||
# Note this function returns the string to be formatted, not the actual message to be logged
|
||||
record["extra"]["serialized"] = "555555"
|
||||
max_len = 12
|
||||
record['function_x'] = record['function'].center(max_len)
|
||||
if len(record['function_x']) > max_len:
|
||||
record['function_x'] = ".." + record['function_x'][-(max_len-2):]
|
||||
record['line_x'] = str(record['line']).ljust(3)
|
||||
return '<green>{time:HH:mm}</green> | <cyan>{function_x}</cyan>:<cyan>{line_x}</cyan> | <level>{message}</level>\n'
|
||||
|
||||
def setup_logging(PATH_LOGGING):
|
||||
|
||||
admin_log_path = os.path.join(PATH_LOGGING, "admin")
|
||||
os.makedirs(admin_log_path, exist_ok=True)
|
||||
sensitive_log_path = os.path.join(admin_log_path, "chat_secrets.log")
|
||||
regular_log_path = os.path.join(admin_log_path, "console_log.log")
|
||||
logger.remove()
|
||||
logger.configure(
|
||||
levels=[dict(name="WARNING", color="<g>")],
|
||||
)
|
||||
|
||||
logger.add(
|
||||
sys.stderr,
|
||||
format=formatter_with_clip,
|
||||
# format='<green>{time:HH:mm}</green> | <cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>',
|
||||
filter=(lambda record: not chat_log_filter(record)),
|
||||
colorize=True,
|
||||
enqueue=True
|
||||
)
|
||||
|
||||
logger.add(
|
||||
sensitive_log_path,
|
||||
format='<green>{time:MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>',
|
||||
rotation="10 MB",
|
||||
filter=chat_log_filter,
|
||||
enqueue=True,
|
||||
)
|
||||
|
||||
logger.add(
|
||||
regular_log_path,
|
||||
format='<green>{time:MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>',
|
||||
rotation="10 MB",
|
||||
filter=not_chat_log_filter,
|
||||
enqueue=True,
|
||||
)
|
||||
|
||||
logging.getLogger("httpx").setLevel(logging.WARNING)
|
||||
|
||||
logger.warning(f"所有对话记录将自动保存在本地目录{sensitive_log_path}, 请注意自我隐私保护哦!")
|
||||
|
||||
logger.bind(chat_msg=True).info("This message is logged to the file!")
|
||||
|
||||
|
||||
|
||||
|
||||
# logger.debug(f"debug message")
|
||||
# logger.info(f"info message")
|
||||
# logger.success(f"success message")
|
||||
# logger.error(f"error message")
|
||||
# logger.add("special.log", filter=lambda record: "special" in record["extra"])
|
||||
# logger.debug("This message is not logged to the file")
|
||||
在新工单中引用
屏蔽一个用户