diff --git a/config.py b/config.py index 709d4455..e668ac90 100644 --- a/config.py +++ b/config.py @@ -43,7 +43,8 @@ AVAIL_LLM_MODELS = ["qwen-max", "o1-mini", "o1-mini-2024-09-12", "o1", "o1-2024- "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-4v", "glm-3-turbo", "gemini-1.5-pro", "chatglm3", "chatglm4", - "deepseek-chat", "deepseek-coder", "deepseek-reasoner" + "deepseek-chat", "deepseek-coder", "deepseek-reasoner", + "volcengine-deepseek-r1-250120", "volcengine-deepseek-v3-241226", ] EMBEDDING_MODEL = "text-embedding-3-small" @@ -267,6 +268,10 @@ MOONSHOT_API_KEY = "" YIMODEL_API_KEY = "" +# 接入火山引擎的在线大模型),api-key获取地址 https://console.volcengine.com/ark/region:ark+cn-beijing/endpoint +ARK_API_KEY = "00000000-0000-0000-0000-000000000000" # 火山引擎 API KEY + + # 紫东太初大模型 https://ai-maas.wair.ac.cn TAICHU_API_KEY = "" diff --git a/main.py b/main.py index 02f10b13..73a3e2b8 100644 --- a/main.py +++ b/main.py @@ -34,7 +34,7 @@ def encode_plugin_info(k, plugin)->str: def main(): import gradio as gr - if gr.__version__ not in ['3.32.12']: + if gr.__version__ not in ['3.32.12', '3.32.13']: raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.") # 一些基础工具 diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 100f285e..e911a765 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -80,6 +80,7 @@ ollama_endpoint = "http://localhost:11434/api/chat" yimodel_endpoint = "https://api.lingyiwanwu.com/v1/chat/completions" deepseekapi_endpoint = "https://api.deepseek.com/v1/chat/completions" grok_model_endpoint = "https://api.x.ai/v1/chat/completions" +volcengine_endpoint = "https://ark.cn-beijing.volces.com/api/v3/chat/completions" if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/' azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15' @@ -102,6 +103,7 @@ if ollama_endpoint in API_URL_REDIRECT: ollama_endpoint = API_URL_REDIRECT[ollam if yimodel_endpoint in API_URL_REDIRECT: yimodel_endpoint = API_URL_REDIRECT[yimodel_endpoint] if deepseekapi_endpoint in API_URL_REDIRECT: deepseekapi_endpoint = API_URL_REDIRECT[deepseekapi_endpoint] if grok_model_endpoint in API_URL_REDIRECT: grok_model_endpoint = API_URL_REDIRECT[grok_model_endpoint] +if volcengine_endpoint in API_URL_REDIRECT: volcengine_endpoint = API_URL_REDIRECT[volcengine_endpoint] # 获取tokenizer tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo") @@ -954,7 +956,7 @@ if any(item in grok_models for item in AVAIL_LLM_MODELS): try: grok_beta_128k_noui, grok_beta_128k_ui = get_predict_function( api_key_conf_name="GROK_API_KEY", max_output_token=8192, disable_proxy=False - ) + ) model_info.update({ "grok-beta": { @@ -1089,8 +1091,10 @@ if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder }) except: logger.error(trimmed_format_exc()) + # -=-=-=-=-=-=- 幻方-深度求索大模型在线API -=-=-=-=-=-=- -if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS or "deepseek-reasoner" in AVAIL_LLM_MODELS: +claude_models = ["deepseek-chat", "deepseek-coder", "deepseek-reasoner"] +if any(item in claude_models for item in AVAIL_LLM_MODELS): try: deepseekapi_noui, deepseekapi_ui = get_predict_function( api_key_conf_name="DEEPSEEK_API_KEY", max_output_token=4096, disable_proxy=False @@ -1127,6 +1131,60 @@ if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS o }) except: logger.error(trimmed_format_exc()) + +# -=-=-=-=-=-=- 火山引擎 对齐支持 -=-=-=-=-=-=- +for model in [m for m in AVAIL_LLM_MODELS if m.startswith("volcengine-")]: + # 为了更灵活地接入volcengine多模型管理界面,设计了此接口,例子:AVAIL_LLM_MODELS = ["volcengine-deepseek-r1-250120(max_token=6666)"] + # 其中 + # "volcengine-" 是前缀(必要) + # "deepseek-r1-250120" 是模型名(必要) + # "(max_token=6666)" 是配置(非必要) + model_info_extend = model_info + model_info_extend.update({ + "deepseek-r1-250120": { + "max_token": 16384, + "enable_reasoning": True, + "can_multi_thread": True, + "endpoint": volcengine_endpoint, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, + "deepseek-v3-241226": { + "max_token": 16384, + "enable_reasoning": False, + "can_multi_thread": True, + "endpoint": volcengine_endpoint, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, + }) + try: + origin_model_name, max_token_tmp = read_one_api_model_name(model) + # 如果是已知模型,则尝试获取其信息 + original_model_info = model_info_extend.get(origin_model_name.replace("volcengine-", "", 1), None) + except: + logger.error(f"volcengine模型 {model} 的 max_token 配置不是整数,请检查配置文件。") + continue + + volcengine_noui, volcengine_ui = get_predict_function(api_key_conf_name="ARK_API_KEY", max_output_token=8192, disable_proxy=True, model_remove_prefix = ["volcengine-"]) + + this_model_info = { + "fn_with_ui": volcengine_ui, + "fn_without_ui": volcengine_noui, + "endpoint": volcengine_endpoint, + "can_multi_thread": True, + "max_token": 64000, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + } + + # 同步已知模型的其他信息 + attribute = "has_multimodal_capacity" + if original_model_info is not None and original_model_info.get(attribute, None) is not None: this_model_info.update({attribute: original_model_info.get(attribute, None)}) + attribute = "enable_reasoning" + if original_model_info is not None and original_model_info.get(attribute, None) is not None: this_model_info.update({attribute: original_model_info.get(attribute, None)}) + model_info.update({model: this_model_info}) + # -=-=-=-=-=-=- one-api 对齐支持 -=-=-=-=-=-=- for model in [m for m in AVAIL_LLM_MODELS if m.startswith("one-api-")]: # 为了更灵活地接入one-api多模型管理界面,设计了此接口,例子:AVAIL_LLM_MODELS = ["one-api-mixtral-8x7b(max_token=6666)"] diff --git a/request_llms/oai_std_model_template.py b/request_llms/oai_std_model_template.py index d50e0d27..66f3a0d7 100644 --- a/request_llms/oai_std_model_template.py +++ b/request_llms/oai_std_model_template.py @@ -57,7 +57,7 @@ def decode_chunk(chunk): finish_reason = chunk["error"]["code"] except: finish_reason = "API_ERROR" - return response, reasoning_content, finish_reason + return response, reasoning_content, finish_reason, str(chunk) try: if chunk["choices"][0]["delta"]["content"] is not None: @@ -122,7 +122,8 @@ def generate_message(input, model, key, history, max_output_token, system_prompt def get_predict_function( api_key_conf_name, max_output_token, - disable_proxy = False + disable_proxy = False, + model_remove_prefix = [], ): """ 为openai格式的API生成响应函数,其中传入参数: @@ -137,6 +138,16 @@ def get_predict_function( APIKEY = get_conf(api_key_conf_name) + def remove_prefix(model_name): + # 去除模型名字的前缀,输入 volcengine-deepseek-r1-250120 会返回 deepseek-r1-250120 + if not model_remove_prefix: + return model_name + model_without_prefix = model_name + for prefix in model_remove_prefix: + if model_without_prefix.startswith(prefix): + model_without_prefix = model_without_prefix[len(prefix):] + return model_without_prefix + def predict_no_ui_long_connection( inputs, llm_kwargs, @@ -164,9 +175,11 @@ def get_predict_function( raise RuntimeError(f"APIKEY为空,请检查配置文件的{APIKEY}") if inputs == "": inputs = "你好👋" + + headers, payload = generate_message( input=inputs, - model=llm_kwargs["llm_model"], + model=remove_prefix(llm_kwargs["llm_model"]), key=APIKEY, history=history, max_output_token=max_output_token, @@ -302,7 +315,7 @@ def get_predict_function( headers, payload = generate_message( input=inputs, - model=llm_kwargs["llm_model"], + model=remove_prefix(llm_kwargs["llm_model"]), key=APIKEY, history=history, max_output_token=max_output_token, diff --git a/requirements.txt b/requirements.txt index 7708fef0..92c86c01 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -https://public.agent-matrix.com/publish/gradio-3.32.12-py3-none-any.whl +https://public.agent-matrix.com/publish/gradio-3.32.13-py3-none-any.whl fastapi==0.110 gradio-client==0.8 pypdf2==2.12.1 diff --git a/shared_utils/advanced_markdown_format.py b/shared_utils/advanced_markdown_format.py index 383eed98..4771a4f1 100644 --- a/shared_utils/advanced_markdown_format.py +++ b/shared_utils/advanced_markdown_format.py @@ -3,7 +3,8 @@ import re import os import math import html - +import base64 +import gzip from loguru import logger from textwrap import dedent from functools import lru_cache @@ -325,6 +326,14 @@ def markdown_convertion_for_file(txt): # cat them together return pre + convert_stage_5 + suf +def compress_string(s): + compress_string = gzip.compress(s.encode('utf-8')) + return base64.b64encode(compress_string).decode() + +def decompress_string(s): + decoded_string = base64.b64decode(s) + return gzip.decompress(decoded_string).decode('utf-8') + @lru_cache(maxsize=128) # 使用 lru缓存 加快转换速度 def markdown_convertion(txt): """ @@ -336,6 +345,12 @@ def markdown_convertion(txt): # print('警告,输入了已经经过转化的字符串,二次转化可能出问题') return txt # 已经被转化过,不需要再次转化 + # 在文本中插入一个base64编码的原始文本,以便在复制时能够获得原始文本 + raw_text_encoded = compress_string(txt) + raw_text_node = f'