From ecb473bc8b6d331f9fea4398289d5dd933f12408 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sun, 29 Oct 2023 00:46:19 +0800 Subject: [PATCH 1/2] api_key_manager --- request_llms/bridge_chatgpt.py | 29 ++++++++++++++++++----------- request_llms/key_manager.py | 29 +++++++++++++++++++++++++++-- toolbox.py | 6 +++++- 3 files changed, 50 insertions(+), 14 deletions(-) diff --git a/request_llms/bridge_chatgpt.py b/request_llms/bridge_chatgpt.py index 9903da9d..38933c60 100644 --- a/request_llms/bridge_chatgpt.py +++ b/request_llms/bridge_chatgpt.py @@ -29,6 +29,12 @@ proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \ timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \ '网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。' +def report_invalid_key(key): + if get_conf("BLOCK_INVALID_APIKEY"): + # 实验性功能,自动检测并屏蔽失效的KEY,请勿使用 + from request_llms.key_manager import ApiKeyManager + api_key = ApiKeyManager().add_key_to_blacklist(key) + def get_full_error(chunk, stream_response): """ 获取完整的从Openai返回的报错 @@ -83,7 +89,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", 用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗 """ watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可 - headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True) + headers, payload, api_key = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True) retry = 0 while True: try: @@ -113,6 +119,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", if "reduce the length" in error_msg: raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg) else: + if "API key has been deactivated" in error_msg: report_invalid_key(api_key) raise RuntimeError("OpenAI拒绝了请求:" + error_msg) if ('data: [DONE]' in chunk): break # api2d 正常完成 json_data = json.loads(chunk.lstrip('data:'))['choices'][0] @@ -175,7 +182,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp time.sleep(2) try: - headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream) + headers, payload, api_key = generate_payload(inputs, llm_kwargs, history, system_prompt, stream) except RuntimeError as e: chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。") yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面 @@ -223,7 +230,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp yield from update_ui(chatbot=chatbot, history=history, msg="检测到有缺陷的非OpenAI官方接口,建议选择更稳定的接口。") break # 其他情况,直接返回报错 - chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg) + chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg, api_key) yield from update_ui(chatbot=chatbot, history=history, msg="非OpenAI官方接口返回了错误:" + chunk.decode()) # 刷新界面 return @@ -265,12 +272,12 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp chunk = get_full_error(chunk, stream_response) chunk_decoded = chunk.decode() error_msg = chunk_decoded - chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg) + chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg, api_key) yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面 print(error_msg) return -def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg): +def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg, api_key=""): from .bridge_all import model_info openai_website = ' 请登录OpenAI查看详情 https://platform.openai.com/signup' if "reduce the length" in error_msg: @@ -281,15 +288,15 @@ def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg) elif "does not exist" in error_msg: chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.") elif "Incorrect API key" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务. " + openai_website) + chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务. " + openai_website); report_invalid_key(api_key) elif "exceeded your current quota" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务." + openai_website) + chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务." + openai_website); report_invalid_key(api_key) elif "account is not active" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Your account is not active. OpenAI以账户失效为由, 拒绝服务." + openai_website) + chatbot[-1] = (chatbot[-1][0], "[Local Message] Your account is not active. OpenAI以账户失效为由, 拒绝服务." + openai_website); report_invalid_key(api_key) elif "associated with a deactivated account" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] You are associated with a deactivated account. OpenAI以账户失效为由, 拒绝服务." + openai_website) + chatbot[-1] = (chatbot[-1][0], "[Local Message] You are associated with a deactivated account. OpenAI以账户失效为由, 拒绝服务." + openai_website); report_invalid_key(api_key) elif "API key has been deactivated" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] API key has been deactivated. OpenAI以账户失效为由, 拒绝服务." + openai_website) + chatbot[-1] = (chatbot[-1][0], "[Local Message] API key has been deactivated. OpenAI以账户失效为由, 拒绝服务." + openai_website); report_invalid_key(api_key) elif "bad forward key" in error_msg: chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.") elif "Not enough point" in error_msg: @@ -371,6 +378,6 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........") except: print('输入中可能存在乱码。') - return headers,payload + return headers, payload, api_key diff --git a/request_llms/key_manager.py b/request_llms/key_manager.py index 8563d2ef..eed0ed90 100644 --- a/request_llms/key_manager.py +++ b/request_llms/key_manager.py @@ -1,4 +1,6 @@ import random +import os +from toolbox import get_log_folder def Singleton(cls): _instance = {} @@ -12,18 +14,41 @@ def Singleton(cls): @Singleton -class OpenAI_ApiKeyManager(): +class ApiKeyManager(): + """ + 只把失效的key保存在内存中 + """ def __init__(self, mode='blacklist') -> None: # self.key_avail_list = [] self.key_black_list = [] + self.debug = False + self.log = True + self.remain_keys = [] def add_key_to_blacklist(self, key): self.key_black_list.append(key) + if self.debug: print('black list key added', key) + if self.log: + with open( + os.path.join(get_log_folder(user='admin', plugin_name='api_key_manager'), 'invalid_key.log'), 'a+', encoding='utf8') as f: + summary = 'total keys' + len(key_list) + 'valid keys' + len(available_keys) + f.write('\n\n' + summary + '\n') + f.write('---- ----\n') + f.write(key) + f.write('\n') + f.write('---- ----\n') + f.write(str(self.key_black_list)) + f.write('\n') + f.write('---- ----\n') + f.write(str(self.remain_keys)) + f.write('\n') def select_avail_key(self, key_list): # select key from key_list, but avoid keys also in self.key_black_list, raise error if no key can be found available_keys = [key for key in key_list if key not in self.key_black_list] if not available_keys: - raise KeyError("No available key found.") + raise KeyError("所有API KEY都被OPENAI拒绝了") selected_key = random.choice(available_keys) + if self.debug: print('total keys', len(key_list), 'valid keys', len(available_keys)) + if self.log: self.remain_keys = available_keys return selected_key \ No newline at end of file diff --git a/toolbox.py b/toolbox.py index 5b7a7519..7d1d9531 100644 --- a/toolbox.py +++ b/toolbox.py @@ -732,7 +732,11 @@ def select_api_key(keys, llm_model): raise RuntimeError(f"您提供的api-key不满足要求,不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源(右下角更换模型菜单中可切换openai,azure,claude,api2d等请求源)。") api_key = random.choice(avail_key_list) # 随机负载均衡 - if ENABLE + + if get_conf("BLOCK_INVALID_APIKEY"): + # 实验性功能,自动检测并屏蔽失效的KEY,请勿使用 + from request_llms.key_manager import ApiKeyManager + api_key = ApiKeyManager().select_avail_key(avail_key_list) return api_key def read_env_variable(arg, default_value): From 25cf86dae6db43a76b42f34a82a7c4ab6ff0b14e Mon Sep 17 00:00:00 2001 From: binary-husky Date: Mon, 30 Oct 2023 10:59:08 +0800 Subject: [PATCH 2/2] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dget=5Fconf=E6=8E=A5?= =?UTF-8?q?=E5=8F=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- main.py | 2 +- request_llms/bridge_chatgpt.py | 1 + request_llms/bridge_qianfan.py | 2 +- request_llms/bridge_spark.py | 2 +- request_llms/bridge_stackclaude.py | 4 ++-- 5 files changed, 6 insertions(+), 5 deletions(-) diff --git a/main.py b/main.py index ee8f5cf7..bf843825 100644 --- a/main.py +++ b/main.py @@ -433,7 +433,7 @@ def main(): server_port=PORT, favicon_path=os.path.join(os.path.dirname(__file__), "docs/logo.png"), auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None, - blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"]) + blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile","gpt_log/admin"]) # 如果需要在二级路径下运行 # CUSTOM_PATH = get_conf('CUSTOM_PATH') diff --git a/request_llms/bridge_chatgpt.py b/request_llms/bridge_chatgpt.py index 38933c60..c9572e19 100644 --- a/request_llms/bridge_chatgpt.py +++ b/request_llms/bridge_chatgpt.py @@ -120,6 +120,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg) else: if "API key has been deactivated" in error_msg: report_invalid_key(api_key) + elif "exceeded your current quota" in error_msg: report_invalid_key(api_key) raise RuntimeError("OpenAI拒绝了请求:" + error_msg) if ('data: [DONE]' in chunk): break # api2d 正常完成 json_data = json.loads(chunk.lstrip('data:'))['choices'][0] diff --git a/request_llms/bridge_qianfan.py b/request_llms/bridge_qianfan.py index 99f0623f..81e7a9c1 100644 --- a/request_llms/bridge_qianfan.py +++ b/request_llms/bridge_qianfan.py @@ -75,7 +75,7 @@ def generate_message_payload(inputs, llm_kwargs, history, system_prompt): def generate_from_baidu_qianfan(inputs, llm_kwargs, history, system_prompt): - BAIDU_CLOUD_QIANFAN_MODEL, = get_conf('BAIDU_CLOUD_QIANFAN_MODEL') + BAIDU_CLOUD_QIANFAN_MODEL = get_conf('BAIDU_CLOUD_QIANFAN_MODEL') url_lib = { "ERNIE-Bot": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions" , diff --git a/request_llms/bridge_spark.py b/request_llms/bridge_spark.py index d6ff42fa..a3a3e091 100644 --- a/request_llms/bridge_spark.py +++ b/request_llms/bridge_spark.py @@ -8,7 +8,7 @@ from multiprocessing import Process, Pipe model_name = '星火认知大模型' def validate_key(): - XFYUN_APPID, = get_conf('XFYUN_APPID', ) + XFYUN_APPID = get_conf('XFYUN_APPID', ) if XFYUN_APPID == '00000000' or XFYUN_APPID == '': return False return True diff --git a/request_llms/bridge_stackclaude.py b/request_llms/bridge_stackclaude.py index a3939844..0b42a17c 100644 --- a/request_llms/bridge_stackclaude.py +++ b/request_llms/bridge_stackclaude.py @@ -36,7 +36,7 @@ try: CHANNEL_ID = None async def open_channel(self): - response = await self.conversations_open(users=get_conf('SLACK_CLAUDE_BOT_ID')[0]) + response = await self.conversations_open(users=get_conf('SLACK_CLAUDE_BOT_ID')) self.CHANNEL_ID = response["channel"]["id"] async def chat(self, text): @@ -51,7 +51,7 @@ try: # TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题 resp = await self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1) msg = [msg for msg in resp["messages"] - if msg.get("user") == get_conf('SLACK_CLAUDE_BOT_ID')[0]] + if msg.get("user") == get_conf('SLACK_CLAUDE_BOT_ID')] return msg except (SlackApiError, KeyError) as e: raise RuntimeError(f"获取Slack消息失败。")