镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 06:26:47 +00:00
Merge branch 'master' into frontier
这个提交包含在:
11
config.py
11
config.py
@@ -32,7 +32,8 @@ else:
|
|||||||
|
|
||||||
# [step 3]>> 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
# [step 3]>> 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
||||||
LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
|
LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
|
||||||
AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
|
AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview",
|
||||||
|
"gpt-4o", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
|
||||||
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
||||||
"gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-4v", "glm-3-turbo",
|
"gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-4v", "glm-3-turbo",
|
||||||
"gemini-pro", "chatglm3"
|
"gemini-pro", "chatglm3"
|
||||||
@@ -44,7 +45,7 @@ AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-p
|
|||||||
# "spark", "sparkv2", "sparkv3", "sparkv3.5",
|
# "spark", "sparkv2", "sparkv3", "sparkv3.5",
|
||||||
# "qwen-turbo", "qwen-plus", "qwen-max", "qwen-local",
|
# "qwen-turbo", "qwen-plus", "qwen-max", "qwen-local",
|
||||||
# "moonshot-v1-128k", "moonshot-v1-32k", "moonshot-v1-8k",
|
# "moonshot-v1-128k", "moonshot-v1-32k", "moonshot-v1-8k",
|
||||||
# "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-0125"
|
# "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-0125", "gpt-4o-2024-05-13"
|
||||||
# "claude-3-haiku-20240307","claude-3-sonnet-20240229","claude-3-opus-20240229", "claude-2.1", "claude-instant-1.2",
|
# "claude-3-haiku-20240307","claude-3-sonnet-20240229","claude-3-opus-20240229", "claude-2.1", "claude-instant-1.2",
|
||||||
# "moss", "llama2", "chatglm_onnx", "internlm", "jittorllms_pangualpha", "jittorllms_llama",
|
# "moss", "llama2", "chatglm_onnx", "internlm", "jittorllms_pangualpha", "jittorllms_llama",
|
||||||
# "deepseek-chat" ,"deepseek-coder",
|
# "deepseek-chat" ,"deepseek-coder",
|
||||||
@@ -104,6 +105,10 @@ TIMEOUT_SECONDS = 30
|
|||||||
WEB_PORT = -1
|
WEB_PORT = -1
|
||||||
|
|
||||||
|
|
||||||
|
# 是否自动打开浏览器页面
|
||||||
|
AUTO_OPEN_BROWSER = True
|
||||||
|
|
||||||
|
|
||||||
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
||||||
MAX_RETRY = 2
|
MAX_RETRY = 2
|
||||||
|
|
||||||
@@ -197,7 +202,7 @@ ALIYUN_SECRET="" # (无需填写)
|
|||||||
|
|
||||||
|
|
||||||
# GPT-SOVITS 文本转语音服务的运行地址(将语言模型的生成文本朗读出来)
|
# GPT-SOVITS 文本转语音服务的运行地址(将语言模型的生成文本朗读出来)
|
||||||
TTS_TYPE = "DISABLE" # LOCAL / LOCAL_SOVITS_API / DISABLE
|
TTS_TYPE = "DISABLE" # EDGE_TTS / LOCAL_SOVITS_API / DISABLE
|
||||||
GPT_SOVITS_URL = ""
|
GPT_SOVITS_URL = ""
|
||||||
EDGE_TTS_VOICE = "zh-CN-XiaoxiaoNeural"
|
EDGE_TTS_VOICE = "zh-CN-XiaoxiaoNeural"
|
||||||
|
|
||||||
|
|||||||
@@ -33,6 +33,8 @@ def get_core_functions():
|
|||||||
"AutoClearHistory": False,
|
"AutoClearHistory": False,
|
||||||
# [6] 文本预处理 (可选参数,默认 None,举例:写个函数移除所有的换行符)
|
# [6] 文本预处理 (可选参数,默认 None,举例:写个函数移除所有的换行符)
|
||||||
"PreProcess": None,
|
"PreProcess": None,
|
||||||
|
# [7] 模型选择 (可选参数。如不设置,则使用当前全局模型;如设置,则用指定模型覆盖全局模型。)
|
||||||
|
# "ModelOverride": "gpt-3.5-turbo", # 主要用途:强制点击此基础功能按钮时,使用指定的模型。
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -313,7 +313,7 @@
|
|||||||
"注意": "Attention",
|
"注意": "Attention",
|
||||||
"以下“红颜色”标识的函数插件需从输入区读取路径作为参数": "The function plugins marked in 'red' below need to read the path from the input area as a parameter",
|
"以下“红颜色”标识的函数插件需从输入区读取路径作为参数": "The function plugins marked in 'red' below need to read the path from the input area as a parameter",
|
||||||
"更多函数插件": "More function plugins",
|
"更多函数插件": "More function plugins",
|
||||||
"打开插件列表": "Open plugin list",
|
"点击这里搜索插件列表": "Click Here to Search the Plugin List",
|
||||||
"高级参数输入区": "Advanced parameter input area",
|
"高级参数输入区": "Advanced parameter input area",
|
||||||
"这里是特殊函数插件的高级参数输入区": "Here is the advanced parameter input area for special function plugins",
|
"这里是特殊函数插件的高级参数输入区": "Here is the advanced parameter input area for special function plugins",
|
||||||
"请先从插件列表中选择": "Please select from the plugin list first",
|
"请先从插件列表中选择": "Please select from the plugin list first",
|
||||||
|
|||||||
9
main.py
9
main.py
@@ -122,7 +122,7 @@ def main():
|
|||||||
predefined_btns.update({k: functional[k]["Button"]})
|
predefined_btns.update({k: functional[k]["Button"]})
|
||||||
with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn:
|
with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn:
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)")
|
gr.Markdown("<small>插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)</small>")
|
||||||
with gr.Row(elem_id="input-plugin-group"):
|
with gr.Row(elem_id="input-plugin-group"):
|
||||||
plugin_group_sel = gr.Dropdown(choices=all_plugin_groups, label='', show_label=False, value=DEFAULT_FN_GROUPS,
|
plugin_group_sel = gr.Dropdown(choices=all_plugin_groups, label='', show_label=False, value=DEFAULT_FN_GROUPS,
|
||||||
multiselect=True, interactive=True, elem_classes='normal_mut_select').style(container=False)
|
multiselect=True, interactive=True, elem_classes='normal_mut_select').style(container=False)
|
||||||
@@ -142,7 +142,7 @@ def main():
|
|||||||
if not plugin.get("AsButton", True): dropdown_fn_list.append(k) # 排除已经是按钮的插件
|
if not plugin.get("AsButton", True): dropdown_fn_list.append(k) # 排除已经是按钮的插件
|
||||||
elif plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示
|
elif plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="", show_label=False).style(container=False)
|
dropdown = gr.Dropdown(dropdown_fn_list, value=r"点击这里搜索插件列表", label="", show_label=False).style(container=False)
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False,
|
plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False,
|
||||||
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
|
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
|
||||||
@@ -314,7 +314,7 @@ def main():
|
|||||||
)
|
)
|
||||||
# 随变按钮的回调函数注册
|
# 随变按钮的回调函数注册
|
||||||
def route(request: gr.Request, k, *args, **kwargs):
|
def route(request: gr.Request, k, *args, **kwargs):
|
||||||
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
if k in [r"点击这里搜索插件列表", r"请先从插件列表中选择"]: return
|
||||||
yield from ArgsGeneralWrapper(plugins[k]["Function"])(request, *args, **kwargs)
|
yield from ArgsGeneralWrapper(plugins[k]["Function"])(request, *args, **kwargs)
|
||||||
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo], output_combo)
|
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo], output_combo)
|
||||||
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot]).then(None, [switchy_bt], None, _js=r"(fn)=>on_plugin_exe_complete(fn)")
|
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot]).then(None, [switchy_bt], None, _js=r"(fn)=>on_plugin_exe_complete(fn)")
|
||||||
@@ -364,8 +364,9 @@ def main():
|
|||||||
def warm_up_mods(): time.sleep(6); warm_up_modules()
|
def warm_up_mods(): time.sleep(6); warm_up_modules()
|
||||||
|
|
||||||
threading.Thread(target=auto_updates, name="self-upgrade", daemon=True).start() # 查看自动更新
|
threading.Thread(target=auto_updates, name="self-upgrade", daemon=True).start() # 查看自动更新
|
||||||
threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面
|
|
||||||
threading.Thread(target=warm_up_mods, name="warm-up", daemon=True).start() # 预热tiktoken模块
|
threading.Thread(target=warm_up_mods, name="warm-up", daemon=True).start() # 预热tiktoken模块
|
||||||
|
if get_conf('AUTO_OPEN_BROWSER'):
|
||||||
|
threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面
|
||||||
|
|
||||||
# 运行一些异步任务:自动更新、打开浏览器页面、预热tiktoken模块
|
# 运行一些异步任务:自动更新、打开浏览器页面、预热tiktoken模块
|
||||||
run_delayed_tasks()
|
run_delayed_tasks()
|
||||||
|
|||||||
@@ -179,6 +179,24 @@ model_info = {
|
|||||||
"token_cnt": get_token_num_gpt4,
|
"token_cnt": get_token_num_gpt4,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"gpt-4o": {
|
||||||
|
"fn_with_ui": chatgpt_ui,
|
||||||
|
"fn_without_ui": chatgpt_noui,
|
||||||
|
"endpoint": openai_endpoint,
|
||||||
|
"max_token": 128000,
|
||||||
|
"tokenizer": tokenizer_gpt4,
|
||||||
|
"token_cnt": get_token_num_gpt4,
|
||||||
|
},
|
||||||
|
|
||||||
|
"gpt-4o-2024-05-13": {
|
||||||
|
"fn_with_ui": chatgpt_ui,
|
||||||
|
"fn_without_ui": chatgpt_noui,
|
||||||
|
"endpoint": openai_endpoint,
|
||||||
|
"max_token": 128000,
|
||||||
|
"tokenizer": tokenizer_gpt4,
|
||||||
|
"token_cnt": get_token_num_gpt4,
|
||||||
|
},
|
||||||
|
|
||||||
"gpt-4-turbo-preview": {
|
"gpt-4-turbo-preview": {
|
||||||
"fn_with_ui": chatgpt_ui,
|
"fn_with_ui": chatgpt_ui,
|
||||||
"fn_without_ui": chatgpt_noui,
|
"fn_without_ui": chatgpt_noui,
|
||||||
@@ -971,6 +989,13 @@ if len(AZURE_CFG_ARRAY) > 0:
|
|||||||
AVAIL_LLM_MODELS += [azure_model_name]
|
AVAIL_LLM_MODELS += [azure_model_name]
|
||||||
|
|
||||||
|
|
||||||
|
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
|
||||||
|
# -=-=-=-=-=-=-=-=-=- ☝️ 以上是模型路由 -=-=-=-=-=-=-=-=-=
|
||||||
|
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
|
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
|
||||||
|
# -=-=-=-=-=-=-= 👇 以下是多模型路由切换函数 -=-=-=-=-=-=-=
|
||||||
|
# -=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=--=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
|
|
||||||
def LLM_CATCH_EXCEPTION(f):
|
def LLM_CATCH_EXCEPTION(f):
|
||||||
@@ -1007,13 +1032,11 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys
|
|||||||
model = llm_kwargs['llm_model']
|
model = llm_kwargs['llm_model']
|
||||||
n_model = 1
|
n_model = 1
|
||||||
if '&' not in model:
|
if '&' not in model:
|
||||||
|
# 如果只询问“一个”大语言模型(多数情况):
|
||||||
# 如果只询问1个大语言模型:
|
|
||||||
method = model_info[model]["fn_without_ui"]
|
method = model_info[model]["fn_without_ui"]
|
||||||
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||||
else:
|
else:
|
||||||
|
# 如果同时询问“多个”大语言模型,这个稍微啰嗦一点,但思路相同,您不必读这个else分支
|
||||||
# 如果同时询问多个大语言模型,这个稍微啰嗦一点,但思路相同,您不必读这个else分支
|
|
||||||
executor = ThreadPoolExecutor(max_workers=4)
|
executor = ThreadPoolExecutor(max_workers=4)
|
||||||
models = model.split('&')
|
models = model.split('&')
|
||||||
n_model = len(models)
|
n_model = len(models)
|
||||||
@@ -1066,8 +1089,26 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys
|
|||||||
res = '<br/><br/>\n\n---\n\n'.join(return_string_collect)
|
res = '<br/><br/>\n\n---\n\n'.join(return_string_collect)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
# 根据基础功能区 ModelOverride 参数调整模型类型,用于 `predict` 中
|
||||||
|
import importlib
|
||||||
|
import core_functional
|
||||||
|
def execute_model_override(llm_kwargs, additional_fn, method):
|
||||||
|
functional = core_functional.get_core_functions()
|
||||||
|
if (additional_fn in functional) and 'ModelOverride' in functional[additional_fn]:
|
||||||
|
# 热更新Prompt & ModelOverride
|
||||||
|
importlib.reload(core_functional)
|
||||||
|
functional = core_functional.get_core_functions()
|
||||||
|
model_override = functional[additional_fn]['ModelOverride']
|
||||||
|
if model_override not in model_info:
|
||||||
|
raise ValueError(f"模型覆盖参数 '{model_override}' 指向一个暂不支持的模型,请检查配置文件。")
|
||||||
|
method = model_info[model_override]["fn_with_ui"]
|
||||||
|
llm_kwargs['llm_model'] = model_override
|
||||||
|
return llm_kwargs, additional_fn, method
|
||||||
|
# 默认返回原参数
|
||||||
|
return llm_kwargs, additional_fn, method
|
||||||
|
|
||||||
def predict(inputs:str, llm_kwargs:dict, *args, **kwargs):
|
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot,
|
||||||
|
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
||||||
"""
|
"""
|
||||||
发送至LLM,流式获取输出。
|
发送至LLM,流式获取输出。
|
||||||
用于基础的对话功能。
|
用于基础的对话功能。
|
||||||
@@ -1086,6 +1127,11 @@ def predict(inputs:str, llm_kwargs:dict, *args, **kwargs):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
|
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
|
||||||
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项
|
|
||||||
yield from method(inputs, llm_kwargs, *args, **kwargs)
|
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项
|
||||||
|
|
||||||
|
if additional_fn: # 根据基础功能区 ModelOverride 参数调整模型类型
|
||||||
|
llm_kwargs, additional_fn, method = execute_model_override(llm_kwargs, additional_fn, method)
|
||||||
|
|
||||||
|
yield from method(inputs, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, stream, additional_fn)
|
||||||
|
|
||||||
|
|||||||
@@ -331,6 +331,9 @@ function addCopyButton(botElement, index, is_last_in_arr) {
|
|||||||
toast_push('正在合成语音 & 自动朗读已开启 (再次点击此按钮可禁用自动朗读)。', 3000);
|
toast_push('正在合成语音 & 自动朗读已开启 (再次点击此按钮可禁用自动朗读)。', 3000);
|
||||||
// toast_push('正在合成语音', 3000);
|
// toast_push('正在合成语音', 3000);
|
||||||
const readText = botElement.innerText;
|
const readText = botElement.innerText;
|
||||||
|
prev_chatbot_index = index;
|
||||||
|
prev_text = readText;
|
||||||
|
prev_text_already_pushed = readText;
|
||||||
push_text_to_audio(readText);
|
push_text_to_audio(readText);
|
||||||
setCookie("js_auto_read_cookie", "True", 365);
|
setCookie("js_auto_read_cookie", "True", 365);
|
||||||
}
|
}
|
||||||
@@ -1033,7 +1036,7 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) {
|
|||||||
|
|
||||||
|
|
||||||
function reset_conversation(a, b) {
|
function reset_conversation(a, b) {
|
||||||
console.log("js_code_reset");
|
// console.log("js_code_reset");
|
||||||
a = btoa(unescape(encodeURIComponent(JSON.stringify(a))));
|
a = btoa(unescape(encodeURIComponent(JSON.stringify(a))));
|
||||||
setCookie("js_previous_chat_cookie", a, 1);
|
setCookie("js_previous_chat_cookie", a, 1);
|
||||||
gen_restore_btn();
|
gen_restore_btn();
|
||||||
@@ -1173,7 +1176,7 @@ async function on_plugin_exe_complete(fn_name) {
|
|||||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
// 第 8 部分: TTS语音生成函数
|
// 第 8 部分: TTS语音生成函数
|
||||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
|
audio_debug = false;
|
||||||
class AudioPlayer {
|
class AudioPlayer {
|
||||||
constructor() {
|
constructor() {
|
||||||
this.audioCtx = new (window.AudioContext || window.webkitAudioContext)();
|
this.audioCtx = new (window.AudioContext || window.webkitAudioContext)();
|
||||||
@@ -1321,14 +1324,14 @@ function trigger(T, fire) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
prev_text = "";
|
prev_text = ""; // previous text, this is used to check chat changes
|
||||||
prev_text_already_pushed = "";
|
prev_text_already_pushed = ""; // previous text already pushed to audio, this is used to check where we should continue to play audio
|
||||||
prev_chatbot_index = -1;
|
prev_chatbot_index = -1;
|
||||||
const delay_live_text_update = trigger(3000, on_live_stream_terminate);
|
const delay_live_text_update = trigger(3000, on_live_stream_terminate);
|
||||||
|
|
||||||
function on_live_stream_terminate(latest_text) {
|
function on_live_stream_terminate(latest_text) {
|
||||||
// remove `prev_text_already_pushed` from `latest_text`
|
// remove `prev_text_already_pushed` from `latest_text`
|
||||||
console.log("on_live_stream_terminate", latest_text)
|
if (audio_debug) console.log("on_live_stream_terminate", latest_text);
|
||||||
remaining_text = latest_text.slice(prev_text_already_pushed.length);
|
remaining_text = latest_text.slice(prev_text_already_pushed.length);
|
||||||
if ((!isEmptyOrWhitespaceOnly(remaining_text)) && remaining_text.length != 0) {
|
if ((!isEmptyOrWhitespaceOnly(remaining_text)) && remaining_text.length != 0) {
|
||||||
prev_text_already_pushed = latest_text;
|
prev_text_already_pushed = latest_text;
|
||||||
@@ -1393,19 +1396,19 @@ function process_latest_text_output(text, chatbot_index) {
|
|||||||
delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
|
delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
|
||||||
}
|
}
|
||||||
else if (chatbot_index == prev_chatbot_index && !is_continue) {
|
else if (chatbot_index == prev_chatbot_index && !is_continue) {
|
||||||
console.log('---------------------')
|
if (audio_debug) console.log('---------------------');
|
||||||
console.log('text twisting!')
|
if (audio_debug) console.log('text twisting!');
|
||||||
console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed)
|
if (audio_debug) console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed);
|
||||||
console.log('---------------------')
|
if (audio_debug) console.log('---------------------');
|
||||||
prev_text_already_pushed = "";
|
prev_text_already_pushed = "";
|
||||||
delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
|
delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
// on_new_message_begin, we have to clear `prev_text_already_pushed`
|
// on_new_message_begin, we have to clear `prev_text_already_pushed`
|
||||||
console.log('---------------------')
|
if (audio_debug) console.log('---------------------');
|
||||||
console.log('new message begin!')
|
if (audio_debug) console.log('new message begin!');
|
||||||
console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed)
|
if (audio_debug) console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed);
|
||||||
console.log('---------------------')
|
if (audio_debug) console.log('---------------------');
|
||||||
prev_text_already_pushed = "";
|
prev_text_already_pushed = "";
|
||||||
process_increased_text(text);
|
process_increased_text(text);
|
||||||
delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
|
delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
|
||||||
@@ -1433,7 +1436,7 @@ async function push_text_to_audio(text) {
|
|||||||
// Call the async postData function and log the response
|
// Call the async postData function and log the response
|
||||||
post_text(url, payload, send_index);
|
post_text(url, payload, send_index);
|
||||||
send_index = send_index + 1;
|
send_index = send_index + 1;
|
||||||
console.log(send_index, audio_buf_text)
|
if (audio_debug) console.log(send_index, audio_buf_text);
|
||||||
// sleep 2 seconds
|
// sleep 2 seconds
|
||||||
if (allow_auto_read_tts_flag) {
|
if (allow_auto_read_tts_flag) {
|
||||||
await delay(3000);
|
await delay(3000);
|
||||||
@@ -1450,10 +1453,10 @@ to_be_processed = [];
|
|||||||
async function UpdatePlayQueue(cnt, audio_buf_wave) {
|
async function UpdatePlayQueue(cnt, audio_buf_wave) {
|
||||||
if (cnt != recv_index) {
|
if (cnt != recv_index) {
|
||||||
to_be_processed.push([cnt, audio_buf_wave]);
|
to_be_processed.push([cnt, audio_buf_wave]);
|
||||||
console.log('cache', cnt);
|
if (audio_debug) console.log('cache', cnt);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
console.log('processing', cnt);
|
if (audio_debug) console.log('processing', cnt);
|
||||||
recv_index = recv_index + 1;
|
recv_index = recv_index + 1;
|
||||||
if (audio_buf_wave) {
|
if (audio_buf_wave) {
|
||||||
audioPlayer.enqueueAudio(audio_buf_wave);
|
audioPlayer.enqueueAudio(audio_buf_wave);
|
||||||
@@ -1463,7 +1466,7 @@ async function UpdatePlayQueue(cnt, audio_buf_wave) {
|
|||||||
find_any = false;
|
find_any = false;
|
||||||
for (i = to_be_processed.length - 1; i >= 0; i--) {
|
for (i = to_be_processed.length - 1; i >= 0; i--) {
|
||||||
if (to_be_processed[i][0] == recv_index) {
|
if (to_be_processed[i][0] == recv_index) {
|
||||||
console.log('processing cached', recv_index);
|
if (audio_debug) console.log('processing cached', recv_index);
|
||||||
if (to_be_processed[i][1]) {
|
if (to_be_processed[i][1]) {
|
||||||
audioPlayer.enqueueAudio(to_be_processed[i][1]);
|
audioPlayer.enqueueAudio(to_be_processed[i][1]);
|
||||||
}
|
}
|
||||||
|
|||||||
在新工单中引用
屏蔽一个用户