镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-07 06:56:48 +00:00
api_key_manager
这个提交包含在:
@@ -29,6 +29,12 @@ proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
|||||||
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
||||||
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
||||||
|
|
||||||
|
def report_invalid_key(key):
|
||||||
|
if get_conf("BLOCK_INVALID_APIKEY"):
|
||||||
|
# 实验性功能,自动检测并屏蔽失效的KEY,请勿使用
|
||||||
|
from request_llms.key_manager import ApiKeyManager
|
||||||
|
api_key = ApiKeyManager().add_key_to_blacklist(key)
|
||||||
|
|
||||||
def get_full_error(chunk, stream_response):
|
def get_full_error(chunk, stream_response):
|
||||||
"""
|
"""
|
||||||
获取完整的从Openai返回的报错
|
获取完整的从Openai返回的报错
|
||||||
@@ -83,7 +89,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
||||||
"""
|
"""
|
||||||
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
||||||
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
|
headers, payload, api_key = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
|
||||||
retry = 0
|
retry = 0
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
@@ -113,6 +119,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
if "reduce the length" in error_msg:
|
if "reduce the length" in error_msg:
|
||||||
raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
|
raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
|
||||||
else:
|
else:
|
||||||
|
if "API key has been deactivated" in error_msg: report_invalid_key(api_key)
|
||||||
raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
|
raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
|
||||||
if ('data: [DONE]' in chunk): break # api2d 正常完成
|
if ('data: [DONE]' in chunk): break # api2d 正常完成
|
||||||
json_data = json.loads(chunk.lstrip('data:'))['choices'][0]
|
json_data = json.loads(chunk.lstrip('data:'))['choices'][0]
|
||||||
@@ -175,7 +182,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
|
headers, payload, api_key = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
|
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
|
||||||
@@ -223,7 +230,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
yield from update_ui(chatbot=chatbot, history=history, msg="检测到有缺陷的非OpenAI官方接口,建议选择更稳定的接口。")
|
yield from update_ui(chatbot=chatbot, history=history, msg="检测到有缺陷的非OpenAI官方接口,建议选择更稳定的接口。")
|
||||||
break
|
break
|
||||||
# 其他情况,直接返回报错
|
# 其他情况,直接返回报错
|
||||||
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg, api_key)
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="非OpenAI官方接口返回了错误:" + chunk.decode()) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="非OpenAI官方接口返回了错误:" + chunk.decode()) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -265,12 +272,12 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
chunk = get_full_error(chunk, stream_response)
|
chunk = get_full_error(chunk, stream_response)
|
||||||
chunk_decoded = chunk.decode()
|
chunk_decoded = chunk.decode()
|
||||||
error_msg = chunk_decoded
|
error_msg = chunk_decoded
|
||||||
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg, api_key)
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
|
||||||
print(error_msg)
|
print(error_msg)
|
||||||
return
|
return
|
||||||
|
|
||||||
def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg):
|
def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg, api_key=""):
|
||||||
from .bridge_all import model_info
|
from .bridge_all import model_info
|
||||||
openai_website = ' 请登录OpenAI查看详情 https://platform.openai.com/signup'
|
openai_website = ' 请登录OpenAI查看详情 https://platform.openai.com/signup'
|
||||||
if "reduce the length" in error_msg:
|
if "reduce the length" in error_msg:
|
||||||
@@ -281,15 +288,15 @@ def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
|||||||
elif "does not exist" in error_msg:
|
elif "does not exist" in error_msg:
|
||||||
chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.")
|
chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.")
|
||||||
elif "Incorrect API key" in error_msg:
|
elif "Incorrect API key" in error_msg:
|
||||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务. " + openai_website)
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务. " + openai_website); report_invalid_key(api_key)
|
||||||
elif "exceeded your current quota" in error_msg:
|
elif "exceeded your current quota" in error_msg:
|
||||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务." + openai_website)
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务." + openai_website); report_invalid_key(api_key)
|
||||||
elif "account is not active" in error_msg:
|
elif "account is not active" in error_msg:
|
||||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Your account is not active. OpenAI以账户失效为由, 拒绝服务." + openai_website)
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] Your account is not active. OpenAI以账户失效为由, 拒绝服务." + openai_website); report_invalid_key(api_key)
|
||||||
elif "associated with a deactivated account" in error_msg:
|
elif "associated with a deactivated account" in error_msg:
|
||||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] You are associated with a deactivated account. OpenAI以账户失效为由, 拒绝服务." + openai_website)
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] You are associated with a deactivated account. OpenAI以账户失效为由, 拒绝服务." + openai_website); report_invalid_key(api_key)
|
||||||
elif "API key has been deactivated" in error_msg:
|
elif "API key has been deactivated" in error_msg:
|
||||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] API key has been deactivated. OpenAI以账户失效为由, 拒绝服务." + openai_website)
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] API key has been deactivated. OpenAI以账户失效为由, 拒绝服务." + openai_website); report_invalid_key(api_key)
|
||||||
elif "bad forward key" in error_msg:
|
elif "bad forward key" in error_msg:
|
||||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.")
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.")
|
||||||
elif "Not enough point" in error_msg:
|
elif "Not enough point" in error_msg:
|
||||||
@@ -371,6 +378,6 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
|||||||
print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
|
print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
|
||||||
except:
|
except:
|
||||||
print('输入中可能存在乱码。')
|
print('输入中可能存在乱码。')
|
||||||
return headers,payload
|
return headers, payload, api_key
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
import random
|
import random
|
||||||
|
import os
|
||||||
|
from toolbox import get_log_folder
|
||||||
|
|
||||||
def Singleton(cls):
|
def Singleton(cls):
|
||||||
_instance = {}
|
_instance = {}
|
||||||
@@ -12,18 +14,41 @@ def Singleton(cls):
|
|||||||
|
|
||||||
|
|
||||||
@Singleton
|
@Singleton
|
||||||
class OpenAI_ApiKeyManager():
|
class ApiKeyManager():
|
||||||
|
"""
|
||||||
|
只把失效的key保存在内存中
|
||||||
|
"""
|
||||||
def __init__(self, mode='blacklist') -> None:
|
def __init__(self, mode='blacklist') -> None:
|
||||||
# self.key_avail_list = []
|
# self.key_avail_list = []
|
||||||
self.key_black_list = []
|
self.key_black_list = []
|
||||||
|
self.debug = False
|
||||||
|
self.log = True
|
||||||
|
self.remain_keys = []
|
||||||
|
|
||||||
def add_key_to_blacklist(self, key):
|
def add_key_to_blacklist(self, key):
|
||||||
self.key_black_list.append(key)
|
self.key_black_list.append(key)
|
||||||
|
if self.debug: print('black list key added', key)
|
||||||
|
if self.log:
|
||||||
|
with open(
|
||||||
|
os.path.join(get_log_folder(user='admin', plugin_name='api_key_manager'), 'invalid_key.log'), 'a+', encoding='utf8') as f:
|
||||||
|
summary = 'num blacklist keys' + len(self.key_black_list) + 'num valid keys' + len(self.remain_keys)
|
||||||
|
f.write('\n\n' + summary + '\n')
|
||||||
|
f.write('---- <add blacklist key> ----\n')
|
||||||
|
f.write(key)
|
||||||
|
f.write('\n')
|
||||||
|
f.write('---- <all blacklist keys> ----\n')
|
||||||
|
f.write(str(self.key_black_list))
|
||||||
|
f.write('\n')
|
||||||
|
f.write('---- <remain keys> ----\n')
|
||||||
|
f.write(str(self.remain_keys))
|
||||||
|
f.write('\n')
|
||||||
|
|
||||||
def select_avail_key(self, key_list):
|
def select_avail_key(self, key_list):
|
||||||
# select key from key_list, but avoid keys also in self.key_black_list, raise error if no key can be found
|
# select key from key_list, but avoid keys also in self.key_black_list, raise error if no key can be found
|
||||||
available_keys = [key for key in key_list if key not in self.key_black_list]
|
available_keys = [key for key in key_list if key not in self.key_black_list]
|
||||||
if not available_keys:
|
if not available_keys:
|
||||||
raise KeyError("No available key found.")
|
raise KeyError("所有API KEY都被OPENAI拒绝了")
|
||||||
selected_key = random.choice(available_keys)
|
selected_key = random.choice(available_keys)
|
||||||
|
if self.debug: print('total keys', len(key_list), 'valid keys', len(available_keys))
|
||||||
|
if self.log: self.remain_keys = available_keys
|
||||||
return selected_key
|
return selected_key
|
||||||
@@ -732,7 +732,11 @@ def select_api_key(keys, llm_model):
|
|||||||
raise RuntimeError(f"您提供的api-key不满足要求,不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源(右下角更换模型菜单中可切换openai,azure,claude,api2d等请求源)。")
|
raise RuntimeError(f"您提供的api-key不满足要求,不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源(右下角更换模型菜单中可切换openai,azure,claude,api2d等请求源)。")
|
||||||
|
|
||||||
api_key = random.choice(avail_key_list) # 随机负载均衡
|
api_key = random.choice(avail_key_list) # 随机负载均衡
|
||||||
if ENABLE
|
|
||||||
|
if get_conf("BLOCK_INVALID_APIKEY"):
|
||||||
|
# 实验性功能,自动检测并屏蔽失效的KEY,请勿使用
|
||||||
|
from request_llms.key_manager import ApiKeyManager
|
||||||
|
api_key = ApiKeyManager().select_avail_key(avail_key_list)
|
||||||
return api_key
|
return api_key
|
||||||
|
|
||||||
def read_env_variable(arg, default_value):
|
def read_env_variable(arg, default_value):
|
||||||
|
|||||||
在新工单中引用
屏蔽一个用户