From fd93622840cfbf8c0c3c73d34e95dcb8a3c6af87 Mon Sep 17 00:00:00 2001 From: Southlandi <54576834+xxflzj@users.noreply.github.com> Date: Sat, 28 Dec 2024 23:22:10 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E5=A4=8DGemini=E5=AF=B9=E8=AF=9D?= =?UTF-8?q?=E9=94=99=E8=AF=AF=E9=97=AE=E9=A2=98=EF=BC=88=E5=81=9C=E7=94=A8?= =?UTF-8?q?=E8=AF=8D=E6=95=B0=E9=87=8F=E4=B8=BA0=E7=9A=84=E6=83=85?= =?UTF-8?q?=E5=86=B5=EF=BC=89=20(#2092)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/com_google.py | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/request_llms/com_google.py b/request_llms/com_google.py index 88e094f5..afb81097 100644 --- a/request_llms/com_google.py +++ b/request_llms/com_google.py @@ -202,16 +202,29 @@ class GoogleChatInit: ) # 处理 history messages.append(self.__conversation_user(inputs, llm_kwargs, enable_multimodal_capacity)) # 处理用户对话 - payload = { - "contents": messages, - "generationConfig": { - # "maxOutputTokens": llm_kwargs.get("max_token", 1024), - "stopSequences": str(llm_kwargs.get("stop", "")).split(" "), - "temperature": llm_kwargs.get("temperature", 1), - "topP": llm_kwargs.get("top_p", 0.8), - "topK": 10, - }, - } + stop_sequences = str(llm_kwargs.get("stop", "")).split(" ") + # 过滤空字符串并确保至少有一个停止序列 + stop_sequences = [s for s in stop_sequences if s] + if not stop_sequences: + payload = { + "contents": messages, + "generationConfig": { + "temperature": llm_kwargs.get("temperature", 1), + "topP": llm_kwargs.get("top_p", 0.8), + "topK": 10, + }, + } + else: + payload = { + "contents": messages, + "generationConfig": { + # "maxOutputTokens": llm_kwargs.get("max_token", 1024), + "stopSequences": stop_sequences, + "temperature": llm_kwargs.get("temperature", 1), + "topP": llm_kwargs.get("top_p", 0.8), + "topK": 10, + }, + } return header, payload