这个提交包含在:
binary-husky
2024-02-25 22:16:46 +08:00
父节点 47289f863d
当前提交 d0703ef32d
共有 96 个文件被更改,包括 7507 次插入2453 次删除

查看文件

@@ -11,7 +11,7 @@
import tiktoken, copy
from functools import lru_cache
from concurrent.futures import ThreadPoolExecutor
from toolbox import get_conf, trimmed_format_exc
from toolbox import get_conf, trimmed_format_exc, apply_gpt_academic_string_mask
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
from .bridge_chatgpt import predict as chatgpt_ui
@@ -31,6 +31,9 @@ from .bridge_qianfan import predict as qianfan_ui
from .bridge_google_gemini import predict as genai_ui
from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui
from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui
from .bridge_zhipu import predict as zhipu_ui
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
class LazyloadTiktoken(object):
@@ -44,13 +47,13 @@ class LazyloadTiktoken(object):
tmp = tiktoken.encoding_for_model(model)
print('加载tokenizer完毕')
return tmp
def encode(self, *args, **kwargs):
encoder = self.get_encoder(self.model)
encoder = self.get_encoder(self.model)
return encoder.encode(*args, **kwargs)
def decode(self, *args, **kwargs):
encoder = self.get_encoder(self.model)
encoder = self.get_encoder(self.model)
return encoder.decode(*args, **kwargs)
# Endpoint 重定向
@@ -63,7 +66,7 @@ azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/compl
# 兼容旧版的配置
try:
API_URL = get_conf("API_URL")
if API_URL != "https://api.openai.com/v1/chat/completions":
if API_URL != "https://api.openai.com/v1/chat/completions":
openai_endpoint = API_URL
print("警告API_URL配置选项将被弃用,请更换为API_URL_REDIRECT配置")
except:
@@ -95,7 +98,7 @@ model_info = {
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"gpt-3.5-turbo-16k": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
@@ -150,6 +153,15 @@ model_info = {
"token_cnt": get_token_num_gpt4,
},
"gpt-4-turbo-preview": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": openai_endpoint,
"max_token": 128000,
"tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4,
},
"gpt-4-1106-preview": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
@@ -159,6 +171,15 @@ model_info = {
"token_cnt": get_token_num_gpt4,
},
"gpt-4-0125-preview": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": openai_endpoint,
"max_token": 128000,
"tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4,
},
"gpt-3.5-random": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
@@ -167,7 +188,7 @@ model_info = {
"tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4,
},
"gpt-4-vision-preview": {
"fn_with_ui": chatgpt_vision_ui,
"fn_without_ui": chatgpt_vision_noui,
@@ -197,16 +218,25 @@ model_info = {
"token_cnt": get_token_num_gpt4,
},
# api_2d (此后不需要在此处添加api2d的接口了,因为下面的代码会自动添加)
"api2d-gpt-3.5-turbo": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
"endpoint": api2d_endpoint,
"max_token": 4096,
# 智谱AI
"glm-4": {
"fn_with_ui": zhipu_ui,
"fn_without_ui": zhipu_noui,
"endpoint": None,
"max_token": 10124 * 8,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"glm-3-turbo": {
"fn_with_ui": zhipu_ui,
"fn_without_ui": zhipu_noui,
"endpoint": None,
"max_token": 10124 * 4,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
# api_2d (此后不需要在此处添加api2d的接口了,因为下面的代码会自动添加)
"api2d-gpt-4": {
"fn_with_ui": chatgpt_ui,
"fn_without_ui": chatgpt_noui,
@@ -530,7 +560,7 @@ if "sparkv2" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
})
except:
print(trimmed_format_exc())
if "sparkv3" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
if "sparkv3" in AVAIL_LLM_MODELS or "sparkv3.5" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
try:
from .bridge_spark import predict_no_ui_long_connection as spark_noui
from .bridge_spark import predict as spark_ui
@@ -542,6 +572,14 @@ if "sparkv3" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"sparkv3.5": {
"fn_with_ui": spark_ui,
"fn_without_ui": spark_noui,
"endpoint": None,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
except:
@@ -562,19 +600,17 @@ if "llama2" in AVAIL_LLM_MODELS: # llama2
})
except:
print(trimmed_format_exc())
if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai
if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai 是glm-4的别名,向后兼容配置
try:
from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui
from .bridge_zhipu import predict as zhipu_ui
model_info.update({
"zhipuai": {
"fn_with_ui": zhipu_ui,
"fn_without_ui": zhipu_noui,
"endpoint": None,
"max_token": 4096,
"max_token": 10124 * 8,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
},
})
except:
print(trimmed_format_exc())
@@ -617,7 +653,7 @@ AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY")
if len(AZURE_CFG_ARRAY) > 0:
for azure_model_name, azure_cfg_dict in AZURE_CFG_ARRAY.items():
# 可能会覆盖之前的配置,但这是意料之中的
if not azure_model_name.startswith('azure'):
if not azure_model_name.startswith('azure'):
raise ValueError("AZURE_CFG_ARRAY中配置的模型必须以azure开头")
endpoint_ = azure_cfg_dict["AZURE_ENDPOINT"] + \
f'openai/deployments/{azure_cfg_dict["AZURE_ENGINE"]}/chat/completions?api-version=2023-05-15'
@@ -668,6 +704,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
"""
import threading, time, copy
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
model = llm_kwargs['llm_model']
n_model = 1
if '&' not in model:
@@ -682,7 +719,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
executor = ThreadPoolExecutor(max_workers=4)
models = model.split('&')
n_model = len(models)
window_len = len(observe_window)
assert window_len==3
window_mutex = [["", time.time(), ""] for _ in range(n_model)] + [True]
@@ -701,7 +738,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
time.sleep(0.25)
if not window_mutex[-1]: break
# 看门狗watchdog
for i in range(n_model):
for i in range(n_model):
window_mutex[i][1] = observe_window[1]
# 观察窗window
chat_string = []
@@ -741,6 +778,7 @@ def predict(inputs, llm_kwargs, *args, **kwargs):
additional_fn代表点击的哪个按钮,按钮见functional.py
"""
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项
yield from method(inputs, llm_kwargs, *args, **kwargs)

查看文件

@@ -113,6 +113,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
error_msg = get_full_error(chunk, stream_response).decode()
if "reduce the length" in error_msg:
raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
elif """type":"upstream_error","param":"307""" in error_msg:
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
else:
raise RuntimeError("OpenAI拒绝了请求" + error_msg)
if ('data: [DONE]' in chunk_decoded): break # api2d 正常完成

查看文件

@@ -57,6 +57,10 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
if "vision" in llm_kwargs["llm_model"]:
have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot)
if not have_recent_file:
chatbot.append((inputs, "没有检测到任何近期上传的图像文件,请上传jpg格式的图片,此外,请注意拓展名需要小写"))
yield from update_ui(chatbot=chatbot, history=history, msg="等待图片") # 刷新界面
return
def make_media_input(inputs, image_paths):
for image_path in image_paths:
inputs = inputs + f'<br/><br/><div align="center"><img src="file={os.path.abspath(image_path)}"></div>'

查看文件

@@ -146,21 +146,17 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
yield from update_ui(chatbot=chatbot, history=history)
# 开始接收回复
try:
response = f"[Local Message] 等待{model_name}响应中 ..."
for response in generate_from_baidu_qianfan(inputs, llm_kwargs, history, system_prompt):
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)
history.extend([inputs, response])
yield from update_ui(chatbot=chatbot, history=history)
except ConnectionAbortedError as e:
from .bridge_all import model_info
if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入history[-2] 是本次输入, history[-1] 是本次输出
history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'],
history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'],
max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一
chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)")
yield from update_ui(chatbot=chatbot, history=history, msg="异常") # 刷新界面
return
# 总结输出
response = f"[Local Message] {model_name}响应异常 ..."
if response == f"[Local Message] 等待{model_name}响应中 ...":
response = f"[Local Message] {model_name}响应异常 ..."
history.extend([inputs, response])
yield from update_ui(chatbot=chatbot, history=history)

查看文件

@@ -51,6 +51,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
# 开始接收回复
from .com_qwenapi import QwenRequestInstance
sri = QwenRequestInstance()
response = f"[Local Message] 等待{model_name}响应中 ..."
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)

查看文件

@@ -56,6 +56,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
# 开始接收回复
from .com_skylark2api import YUNQUERequestInstance
sri = YUNQUERequestInstance()
response = f"[Local Message] 等待{model_name}响应中 ..."
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)

查看文件

@@ -9,7 +9,7 @@ model_name = '星火认知大模型'
def validate_key():
XFYUN_APPID = get_conf('XFYUN_APPID')
if XFYUN_APPID == '00000000' or XFYUN_APPID == '':
if XFYUN_APPID == '00000000' or XFYUN_APPID == '':
return False
return True
@@ -49,9 +49,10 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
from core_functional import handle_core_functionality
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
# 开始接收回复
# 开始接收回复
from .com_sparkapi import SparkRequestInstance
sri = SparkRequestInstance()
response = f"[Local Message] 等待{model_name}响应中 ..."
for response in sri.generate(inputs, llm_kwargs, history, system_prompt, use_image_api=True):
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)

查看文件

@@ -1,15 +1,21 @@
import time
import os
from toolbox import update_ui, get_conf, update_ui_lastest_msg
from toolbox import check_packages, report_exception
from toolbox import check_packages, report_exception, have_any_recent_upload_image_files
model_name = '智谱AI大模型'
zhipuai_default_model = 'glm-4'
def validate_key():
ZHIPUAI_API_KEY = get_conf("ZHIPUAI_API_KEY")
if ZHIPUAI_API_KEY == '': return False
return True
def make_media_input(inputs, image_paths):
for image_path in image_paths:
inputs = inputs + f'<br/><br/><div align="center"><img src="file={os.path.abspath(image_path)}"></div>'
return inputs
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
⭐多线程方法
@@ -18,34 +24,40 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
watch_dog_patience = 5
response = ""
if llm_kwargs["llm_model"] == "zhipuai":
llm_kwargs["llm_model"] = zhipuai_default_model
if validate_key() is False:
raise RuntimeError('请配置ZHIPUAI_API_KEY')
from .com_zhipuapi import ZhipuRequestInstance
sri = ZhipuRequestInstance()
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
# 开始接收回复
from .com_zhipuglm import ZhipuChatInit
zhipu_bro_init = ZhipuChatInit()
for chunk, response in zhipu_bro_init.generate_chat(inputs, llm_kwargs, history, sys_prompt):
if len(observe_window) >= 1:
observe_window[0] = response
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
if (time.time() - observe_window[1]) > watch_dog_patience:
raise RuntimeError("程序终止。")
return response
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
"""
⭐单线程方法
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, ""))
chatbot.append([inputs, ""])
yield from update_ui(chatbot=chatbot, history=history)
# 尝试导入依赖,如果缺少依赖,则给出安装建议
try:
check_packages(["zhipuai"])
except:
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install zhipuai==1.0.7```。",
chatbot=chatbot, history=history, delay=0)
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
chatbot=chatbot, history=history, delay=0)
return
if validate_key() is False:
yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置ZHIPUAI_API_KEY", chatbot=chatbot, history=history, delay=0)
return
@@ -53,16 +65,29 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
if additional_fn is not None:
from core_functional import handle_core_functionality
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
# 开始接收回复
from .com_zhipuapi import ZhipuRequestInstance
sri = ZhipuRequestInstance()
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
chatbot[-1] = (inputs, response)
chatbot[-1] = [inputs, ""]
yield from update_ui(chatbot=chatbot, history=history)
# 总结输出
if response == f"[Local Message] 等待{model_name}响应中 ...":
response = f"[Local Message] {model_name}响应异常 ..."
if llm_kwargs["llm_model"] == "zhipuai":
llm_kwargs["llm_model"] = zhipuai_default_model
if llm_kwargs["llm_model"] in ["glm-4v"]:
have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot)
if not have_recent_file:
chatbot.append((inputs, "没有检测到任何近期上传的图像文件,请上传jpg格式的图片,此外,请注意拓展名需要小写"))
yield from update_ui(chatbot=chatbot, history=history, msg="等待图片") # 刷新界面
return
if have_recent_file:
inputs = make_media_input(inputs, image_paths)
chatbot[-1] = [inputs, ""]
yield from update_ui(chatbot=chatbot, history=history)
# 开始接收回复
from .com_zhipuglm import ZhipuChatInit
zhipu_bro_init = ZhipuChatInit()
for chunk, response in zhipu_bro_init.generate_chat(inputs, llm_kwargs, history, system_prompt):
chatbot[-1] = [inputs, response]
yield from update_ui(chatbot=chatbot, history=history)
history.extend([inputs, response])
yield from update_ui(chatbot=chatbot, history=history)

查看文件

@@ -7,7 +7,7 @@ import os
import re
import requests
from typing import List, Dict, Tuple
from toolbox import get_conf, encode_image, get_pictures_list
from toolbox import get_conf, encode_image, get_pictures_list, to_markdown_tabs
proxies, TIMEOUT_SECONDS = get_conf("proxies", "TIMEOUT_SECONDS")
@@ -112,34 +112,6 @@ def html_local_img(__file, layout="left", max_width=None, max_height=None, md=Tr
return a
def to_markdown_tabs(head: list, tabs: list, alignment=":---:", column=False):
"""
Args:
head: 表头:[]
tabs: 表值:[[列1], [列2], [列3], [列4]]
alignment: :--- 左对齐, :---: 居中对齐, ---: 右对齐
column: True to keep data in columns, False to keep data in rows (default).
Returns:
A string representation of the markdown table.
"""
if column:
transposed_tabs = list(map(list, zip(*tabs)))
else:
transposed_tabs = tabs
# Find the maximum length among the columns
max_len = max(len(column) for column in transposed_tabs)
tab_format = "| %s "
tabs_list = "".join([tab_format % i for i in head]) + "|\n"
tabs_list += "".join([tab_format % alignment for i in head]) + "|\n"
for i in range(max_len):
row_data = [tab[i] if i < len(tab) else "" for tab in transposed_tabs]
row_data = file_manifest_filter_html(row_data, filter_=None)
tabs_list += "".join([tab_format % i for i in row_data]) + "|\n"
return tabs_list
class GoogleChatInit:
def __init__(self):

查看文件

@@ -65,6 +65,7 @@ class SparkRequestInstance():
self.gpt_url = "ws://spark-api.xf-yun.com/v1.1/chat"
self.gpt_url_v2 = "ws://spark-api.xf-yun.com/v2.1/chat"
self.gpt_url_v3 = "ws://spark-api.xf-yun.com/v3.1/chat"
self.gpt_url_v35 = "wss://spark-api.xf-yun.com/v3.5/chat"
self.gpt_url_img = "wss://spark-api.cn-huabei-1.xf-yun.com/v2.1/image"
self.time_to_yield_event = threading.Event()
@@ -91,6 +92,8 @@ class SparkRequestInstance():
gpt_url = self.gpt_url_v2
elif llm_kwargs['llm_model'] == 'sparkv3':
gpt_url = self.gpt_url_v3
elif llm_kwargs['llm_model'] == 'sparkv3.5':
gpt_url = self.gpt_url_v35
else:
gpt_url = self.gpt_url
file_manifest = []
@@ -190,6 +193,7 @@ def gen_params(appid, inputs, llm_kwargs, history, system_prompt, file_manifest)
"spark": "general",
"sparkv2": "generalv2",
"sparkv3": "generalv3",
"sparkv3.5": "generalv3.5",
}
domains_select = domains[llm_kwargs['llm_model']]
if file_manifest: domains_select = 'image'

查看文件

@@ -0,0 +1,84 @@
# encoding: utf-8
# @Time : 2024/1/22
# @Author : Kilig947 & binary husky
# @Descr : 兼容最新的智谱Ai
from toolbox import get_conf
from zhipuai import ZhipuAI
from toolbox import get_conf, encode_image, get_pictures_list
import logging, os
def input_encode_handler(inputs, llm_kwargs):
if llm_kwargs["most_recent_uploaded"].get("path"):
image_paths = get_pictures_list(llm_kwargs["most_recent_uploaded"]["path"])
md_encode = []
for md_path in image_paths:
type_ = os.path.splitext(md_path)[1].replace(".", "")
type_ = "jpeg" if type_ == "jpg" else type_
md_encode.append({"data": encode_image(md_path), "type": type_})
return inputs, md_encode
class ZhipuChatInit:
def __init__(self):
ZHIPUAI_API_KEY, ZHIPUAI_MODEL = get_conf("ZHIPUAI_API_KEY", "ZHIPUAI_MODEL")
if len(ZHIPUAI_MODEL) > 0:
logging.error('ZHIPUAI_MODEL 配置项选项已经弃用,请在LLM_MODEL中配置')
self.zhipu_bro = ZhipuAI(api_key=ZHIPUAI_API_KEY)
self.model = ''
def __conversation_user(self, user_input: str, llm_kwargs):
if self.model not in ["glm-4v"]:
return {"role": "user", "content": user_input}
else:
input_, encode_img = input_encode_handler(user_input, llm_kwargs=llm_kwargs)
what_i_have_asked = {"role": "user", "content": []}
what_i_have_asked['content'].append({"type": 'text', "text": user_input})
if encode_img:
img_d = {"type": "image_url",
"image_url": {'url': encode_img}}
what_i_have_asked['content'].append(img_d)
return what_i_have_asked
def __conversation_history(self, history, llm_kwargs):
messages = []
conversation_cnt = len(history) // 2
if conversation_cnt:
for index in range(0, 2 * conversation_cnt, 2):
what_i_have_asked = self.__conversation_user(history[index], llm_kwargs)
what_gpt_answer = {
"role": "assistant",
"content": history[index + 1]
}
messages.append(what_i_have_asked)
messages.append(what_gpt_answer)
return messages
def __conversation_message_payload(self, inputs, llm_kwargs, history, system_prompt):
messages = []
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
self.model = llm_kwargs['llm_model']
messages.extend(self.__conversation_history(history, llm_kwargs)) # 处理 history
messages.append(self.__conversation_user(inputs, llm_kwargs)) # 处理用户对话
response = self.zhipu_bro.chat.completions.create(
model=self.model, messages=messages, stream=True,
temperature=llm_kwargs.get('temperature', 0.95) * 0.95, # 只能传默认的 temperature 和 top_p
top_p=llm_kwargs.get('top_p', 0.7) * 0.7,
max_tokens=llm_kwargs.get('max_tokens', 1024 * 4), # 最大输出模型的一半
)
return response
def generate_chat(self, inputs, llm_kwargs, history, system_prompt):
self.model = llm_kwargs['llm_model']
response = self.__conversation_message_payload(inputs, llm_kwargs, history, system_prompt)
bro_results = ''
for chunk in response:
bro_results += chunk.choices[0].delta.content
yield chunk.choices[0].delta.content, bro_results
if __name__ == '__main__':
zhipu = ZhipuChatInit()
zhipu.generate_chat('你好', {'llm_model': 'glm-4'}, [], '你是WPSAi')

1
request_llms/moss 子模块

子模块 request_llms/moss 已添加到 4d905bcead