镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 06:26:47 +00:00
紫东太初大模型
这个提交包含在:
@@ -229,9 +229,15 @@ MOONSHOT_API_KEY = ""
|
||||
# 零一万物(Yi Model) API KEY
|
||||
YIMODEL_API_KEY = ""
|
||||
|
||||
|
||||
# 深度求索(DeepSeek) API KEY,默认请求地址为"https://api.deepseek.com/v1/chat/completions"
|
||||
DEEPSEEK_API_KEY = ""
|
||||
|
||||
|
||||
# 紫东太初
|
||||
TAICHU_API_KEY = ""
|
||||
|
||||
|
||||
# Mathpix 拥有执行PDF的OCR功能,但是需要注册账号
|
||||
MATHPIX_APPID = ""
|
||||
MATHPIX_APPKEY = ""
|
||||
|
||||
@@ -233,7 +233,7 @@ def pdf2tex_project(pdf_file_path, plugin_kwargs):
|
||||
def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
# <-------------- information about this plugin ------------->
|
||||
chatbot.append(["函数插件功能?",
|
||||
"对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"])
|
||||
"对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# <-------------- more requirements ------------->
|
||||
@@ -310,7 +310,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
|
||||
# <-------------- information about this plugin ------------->
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Latex项目进行翻译, 生成中文PDF。函数插件贡献者: Binary-Husky。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
|
||||
"对整个Latex项目进行翻译, 生成中文PDF。函数插件贡献者: Binary-Husky。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# <-------------- more requirements ------------->
|
||||
@@ -404,7 +404,7 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
|
||||
# <-------------- information about this plugin ------------->
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"将PDF转换为Latex项目,翻译为中文后重新编译为PDF。函数插件贡献者: Marroh。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
|
||||
"将PDF转换为Latex项目,翻译为中文后重新编译为PDF。函数插件贡献者: Marroh。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# <-------------- more requirements ------------->
|
||||
|
||||
@@ -34,6 +34,9 @@ from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui
|
||||
from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui
|
||||
from .bridge_zhipu import predict as zhipu_ui
|
||||
|
||||
from .bridge_taichu import predict_no_ui_long_connection as taichu_noui
|
||||
from .bridge_taichu import predict as taichu_ui
|
||||
|
||||
from .bridge_cohere import predict as cohere_ui
|
||||
from .bridge_cohere import predict_no_ui_long_connection as cohere_noui
|
||||
|
||||
@@ -116,6 +119,15 @@ model_info = {
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
"taichu": {
|
||||
"fn_with_ui": taichu_ui,
|
||||
"fn_without_ui": taichu_noui,
|
||||
"endpoint": openai_endpoint,
|
||||
"max_token": 4096,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
"gpt-3.5-turbo-16k": {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
|
||||
72
request_llms/bridge_taichu.py
普通文件
72
request_llms/bridge_taichu.py
普通文件
@@ -0,0 +1,72 @@
|
||||
import time
|
||||
import os
|
||||
from toolbox import update_ui, get_conf, update_ui_lastest_msg, log_chat
|
||||
from toolbox import check_packages, report_exception, have_any_recent_upload_image_files
|
||||
from toolbox import ChatBotWithCookies
|
||||
|
||||
model_name = 'Taichu-2.0'
|
||||
taichu_default_model = 'taichu_llm'
|
||||
|
||||
def validate_key():
|
||||
TAICHU_API_KEY = get_conf("TAICHU_API_KEY")
|
||||
if TAICHU_API_KEY == '': return False
|
||||
return True
|
||||
|
||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||
observe_window:list=[], console_slience:bool=False):
|
||||
"""
|
||||
⭐多线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
"""
|
||||
watch_dog_patience = 5
|
||||
response = ""
|
||||
|
||||
if llm_kwargs["llm_model"] == "taichu":
|
||||
llm_kwargs["llm_model"] = taichu_default_model
|
||||
|
||||
if validate_key() is False:
|
||||
raise RuntimeError('请配置 TAICHU_API_KEY')
|
||||
|
||||
# 开始接收回复
|
||||
from .com_taichu import TaichuChatInit
|
||||
zhipu_bro_init = TaichuChatInit()
|
||||
for chunk, response in zhipu_bro_init.generate_chat(inputs, llm_kwargs, history, sys_prompt):
|
||||
if len(observe_window) >= 1:
|
||||
observe_window[0] = response
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time() - observe_window[1]) > watch_dog_patience:
|
||||
raise RuntimeError("程序终止。")
|
||||
return response
|
||||
|
||||
|
||||
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
|
||||
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
||||
"""
|
||||
⭐单线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
"""
|
||||
chatbot.append([inputs, ""])
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
if validate_key() is False:
|
||||
yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置ZHIPUAI_API_KEY", chatbot=chatbot, history=history, delay=0)
|
||||
return
|
||||
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
chatbot[-1] = [inputs, ""]
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
if llm_kwargs["llm_model"] == "taichu":
|
||||
llm_kwargs["llm_model"] = taichu_default_model
|
||||
|
||||
# 开始接收回复
|
||||
from .com_taichu import TaichuChatInit
|
||||
zhipu_bro_init = TaichuChatInit()
|
||||
for chunk, response in zhipu_bro_init.generate_chat(inputs, llm_kwargs, history, system_prompt):
|
||||
chatbot[-1] = [inputs, response]
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
history.extend([inputs, response])
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
55
request_llms/com_taichu.py
普通文件
55
request_llms/com_taichu.py
普通文件
@@ -0,0 +1,55 @@
|
||||
# encoding: utf-8
|
||||
# @Time : 2024/1/22
|
||||
# @Author : Kilig947 & binary husky
|
||||
# @Descr : 兼容最新的智谱Ai
|
||||
from toolbox import get_conf
|
||||
from toolbox import get_conf, encode_image, get_pictures_list
|
||||
import logging, os, requests
|
||||
import json
|
||||
class TaichuChatInit:
|
||||
def __init__(self): ...
|
||||
|
||||
def __conversation_user(self, user_input: str, llm_kwargs:dict):
|
||||
return {"role": "user", "content": user_input}
|
||||
|
||||
def __conversation_history(self, history:list, llm_kwargs:dict):
|
||||
messages = []
|
||||
conversation_cnt = len(history) // 2
|
||||
if conversation_cnt:
|
||||
for index in range(0, 2 * conversation_cnt, 2):
|
||||
what_i_have_asked = self.__conversation_user(history[index], llm_kwargs)
|
||||
what_gpt_answer = {
|
||||
"role": "assistant",
|
||||
"content": history[index + 1]
|
||||
}
|
||||
messages.append(what_i_have_asked)
|
||||
messages.append(what_gpt_answer)
|
||||
return messages
|
||||
|
||||
def generate_chat(self, inputs:str, llm_kwargs:dict, history:list, system_prompt:str):
|
||||
TAICHU_API_KEY = get_conf("TAICHU_API_KEY")
|
||||
params = {
|
||||
'api_key': TAICHU_API_KEY,
|
||||
'model_code': 'taichu_llm',
|
||||
'question': '\n\n'.join(history) + inputs,
|
||||
'prefix': system_prompt,
|
||||
'temperature': llm_kwargs.get('temperature', 0.95),
|
||||
'stream_format': 'json'
|
||||
}
|
||||
|
||||
api = 'https://ai-maas.wair.ac.cn/maas/v1/model_api/invoke'
|
||||
response = requests.post(api, json=params, stream=True)
|
||||
results = ""
|
||||
if response.status_code == 200:
|
||||
response.encoding = 'utf-8'
|
||||
for line in response.iter_lines(decode_unicode=True):
|
||||
delta = json.loads(line)['choices'][0]['text']
|
||||
results += delta
|
||||
yield delta, results
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
zhipu = TaichuChatInit()
|
||||
zhipu.generate_chat('你好', {'llm_model': 'glm-4'}, [], '你是WPSAi')
|
||||
@@ -14,12 +14,13 @@ validate_path() # validate path so you can run from base directory
|
||||
|
||||
if "在线模型":
|
||||
if __name__ == "__main__":
|
||||
from request_llms.bridge_cohere import predict_no_ui_long_connection
|
||||
from request_llms.bridge_taichu import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_cohere import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_spark import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_zhipu import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_chatglm3 import predict_no_ui_long_connection
|
||||
llm_kwargs = {
|
||||
"llm_model": "command-r-plus",
|
||||
"llm_model": "taichu",
|
||||
"max_length": 4096,
|
||||
"top_p": 1,
|
||||
"temperature": 1,
|
||||
|
||||
在新工单中引用
屏蔽一个用户