From 6da56238137231ff081b9dde0bec34b3bddb11b4 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Mon, 15 Jul 2024 04:23:43 +0000 Subject: [PATCH] =?UTF-8?q?=E5=A4=9A=E7=94=A8=E9=80=94=E5=A4=8D=E7=94=A8?= =?UTF-8?q?=E6=8F=90=E4=BA=A4=E6=8C=89=E9=92=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- main.py | 16 +- themes/common.js | 431 ++++++----------------------------------------- themes/common.py | 1 + themes/tts.js | 351 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 415 insertions(+), 384 deletions(-) create mode 100644 themes/tts.js diff --git a/main.py b/main.py index 3feebbb2..16aec818 100644 --- a/main.py +++ b/main.py @@ -112,10 +112,12 @@ def main(): with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary: with gr.Row(): txt = gr.Textbox(show_label=False, placeholder="Input question here.", elem_id='user_input_main').style(container=False) - txt.submit(None, None, None, _js="""click_real_submit_btn""") with gr.Row(): - advanced_submit_btn = gr.Button("提交", elem_id="elem_submit_fake_1", variant="primary") - advanced_submit_btn.click(None, None, None, _js="""click_real_submit_btn""") + multiplex_submit_btn = gr.Button("提交", elem_id="elem_submit_visible", variant="primary") + multiplex_sel = gr.Dropdown( + choices=["常规对话", "多模型对话", "智能上下文", "智能召回 RAG"], value="常规对话", + interactive=True, label='', show_label=False, + elem_classes='normal_mut_select').style(container=False) submit_btn = gr.Button("提交", elem_id="elem_submit", variant="primary", visible=False) with gr.Row(): resetBtn = gr.Button("重置", elem_id="elem_reset", variant="secondary"); resetBtn.style(size="sm") @@ -212,7 +214,14 @@ def main(): input_combo_order = ["cookies", "max_length_sl", "md_dropdown", "txt", "txt2", "top_p", "temperature", "chatbot", "history", "system_prompt", "plugin_advanced_arg"] output_combo = [cookies, chatbot, history, status] predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True)], outputs=output_combo) + # 提交按钮、重置按钮 + multiplex_submit_btn.click( + None, [multiplex_sel], None, _js="""(multiplex_sel)=>multiplex_function_begin(multiplex_sel)""") + txt.submit( + None, [multiplex_sel], None, _js="""(multiplex_sel)=>multiplex_function_begin(multiplex_sel)""") + multiplex_sel.select( + None, [multiplex_sel], None, _js=f"""(multiplex_sel)=>run_multiplex_shift(multiplex_sel)""") cancel_handles.append(submit_btn.click(**predict_args)) resetBtn.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status resetBtn2.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status @@ -235,7 +244,6 @@ def main(): file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}") file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}") # 函数插件-固定按钮区 - for k in plugins: register_advanced_plugin_init_arr += f"""register_plugin_init("{k}","{encode_plugin_info(k, plugins[k])}");""" if plugins[k].get("Class", None): diff --git a/themes/common.js b/themes/common.js index af48b375..c8df1bff 100644 --- a/themes/common.js +++ b/themes/common.js @@ -1050,364 +1050,6 @@ async function on_plugin_exe_complete(fn_name) { } - - - - - - -// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= -// 第 8 部分: TTS语音生成函数 -// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= -audio_debug = false; -class AudioPlayer { - constructor() { - this.audioCtx = new (window.AudioContext || window.webkitAudioContext)(); - this.queue = []; - this.isPlaying = false; - this.currentSource = null; // 添加属性来保存当前播放的源 - } - - // Base64 编码的字符串转换为 ArrayBuffer - base64ToArrayBuffer(base64) { - const binaryString = window.atob(base64); - const len = binaryString.length; - const bytes = new Uint8Array(len); - for (let i = 0; i < len; i++) { - bytes[i] = binaryString.charCodeAt(i); - } - return bytes.buffer; - } - - // 检查音频播放队列并播放音频 - checkQueue() { - if (!this.isPlaying && this.queue.length > 0) { - this.isPlaying = true; - const nextAudio = this.queue.shift(); - this.play_wave(nextAudio); - } - } - - // 将音频添加到播放队列 - enqueueAudio(audio_buf_wave) { - if (allow_auto_read_tts_flag) { - this.queue.push(audio_buf_wave); - this.checkQueue(); - } - } - - // 播放音频 - async play_wave(encodedAudio) { - //const audioData = this.base64ToArrayBuffer(encodedAudio); - const audioData = encodedAudio; - try { - const buffer = await this.audioCtx.decodeAudioData(audioData); - const source = this.audioCtx.createBufferSource(); - source.buffer = buffer; - source.connect(this.audioCtx.destination); - source.onended = () => { - if (allow_auto_read_tts_flag) { - this.isPlaying = false; - this.currentSource = null; // 播放结束后清空当前源 - this.checkQueue(); - } - }; - this.currentSource = source; // 保存当前播放的源 - source.start(); - } catch (e) { - console.log("Audio error!", e); - this.isPlaying = false; - this.currentSource = null; // 出错时也应清空当前源 - this.checkQueue(); - } - } - - // 新增:立即停止播放音频的方法 - stop() { - if (this.currentSource) { - this.queue = []; // 清空队列 - this.currentSource.stop(); // 停止当前源 - this.currentSource = null; // 清空当前源 - this.isPlaying = false; // 更新播放状态 - // 关闭音频上下文可能会导致无法再次播放音频,因此仅停止当前源 - // this.audioCtx.close(); // 可选:如果需要可以关闭音频上下文 - } - } -} - -const audioPlayer = new AudioPlayer(); - -class FIFOLock { - constructor() { - this.queue = []; - this.currentTaskExecuting = false; - } - - lock() { - let resolveLock; - const lock = new Promise(resolve => { - resolveLock = resolve; - }); - - this.queue.push(resolveLock); - - if (!this.currentTaskExecuting) { - this._dequeueNext(); - } - - return lock; - } - - _dequeueNext() { - if (this.queue.length === 0) { - this.currentTaskExecuting = false; - return; - } - this.currentTaskExecuting = true; - const resolveLock = this.queue.shift(); - resolveLock(); - } - - unlock() { - this.currentTaskExecuting = false; - this._dequeueNext(); - } -} - - - - - - - - -function delay(ms) { - return new Promise(resolve => setTimeout(resolve, ms)); -} - -// Define the trigger function with delay parameter T in milliseconds -function trigger(T, fire) { - // Variable to keep track of the timer ID - let timeoutID = null; - // Variable to store the latest arguments - let lastArgs = null; - - return function (...args) { - // Update lastArgs with the latest arguments - lastArgs = args; - // Clear the existing timer if the function is called again - if (timeoutID !== null) { - clearTimeout(timeoutID); - } - // Set a new timer that calls the `fire` function with the latest arguments after T milliseconds - timeoutID = setTimeout(() => { - fire(...lastArgs); - }, T); - }; -} - - -prev_text = ""; // previous text, this is used to check chat changes -prev_text_already_pushed = ""; // previous text already pushed to audio, this is used to check where we should continue to play audio -prev_chatbot_index = -1; -const delay_live_text_update = trigger(3000, on_live_stream_terminate); - -function on_live_stream_terminate(latest_text) { - // remove `prev_text_already_pushed` from `latest_text` - if (audio_debug) console.log("on_live_stream_terminate", latest_text); - remaining_text = latest_text.slice(prev_text_already_pushed.length); - if ((!isEmptyOrWhitespaceOnly(remaining_text)) && remaining_text.length != 0) { - prev_text_already_pushed = latest_text; - push_text_to_audio(remaining_text); - } -} -function is_continue_from_prev(text, prev_text) { - abl = 5 - if (text.length < prev_text.length - abl) { - return false; - } - if (prev_text.length > 10) { - return text.startsWith(prev_text.slice(0, Math.min(prev_text.length - abl, 100))); - } else { - return text.startsWith(prev_text); - } -} -function isEmptyOrWhitespaceOnly(remaining_text) { - // Replace \n and 。 with empty strings - let textWithoutSpecifiedCharacters = remaining_text.replace(/[\n。]/g, ''); - // Check if the remaining string is empty - return textWithoutSpecifiedCharacters.trim().length === 0; -} -function process_increased_text(remaining_text) { - // console.log('[is continue], remaining_text: ', remaining_text) - // remaining_text starts with \n or 。, then move these chars into prev_text_already_pushed - while (remaining_text.startsWith('\n') || remaining_text.startsWith('。')) { - prev_text_already_pushed = prev_text_already_pushed + remaining_text[0]; - remaining_text = remaining_text.slice(1); - } - if (remaining_text.includes('\n') || remaining_text.includes('。')) { // determine remaining_text contain \n or 。 - // new message begin! - index_of_last_sep = Math.max(remaining_text.lastIndexOf('\n'), remaining_text.lastIndexOf('。')); - // break the text into two parts - tobe_pushed = remaining_text.slice(0, index_of_last_sep + 1); - prev_text_already_pushed = prev_text_already_pushed + tobe_pushed; - // console.log('[is continue], push: ', tobe_pushed) - // console.log('[is continue], update prev_text_already_pushed: ', prev_text_already_pushed) - if (!isEmptyOrWhitespaceOnly(tobe_pushed)) { - // console.log('[is continue], remaining_text is empty') - push_text_to_audio(tobe_pushed); - } - } -} -function process_latest_text_output(text, chatbot_index) { - if (text.length == 0) { - prev_text = text; - prev_text_mask = text; - // console.log('empty text') - return; - } - if (text == prev_text) { - // console.log('[nothing changed]') - return; - } - - var is_continue = is_continue_from_prev(text, prev_text_already_pushed); - if (chatbot_index == prev_chatbot_index && is_continue) { - // on_text_continue_grow - remaining_text = text.slice(prev_text_already_pushed.length); - process_increased_text(remaining_text); - delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit - } - else if (chatbot_index == prev_chatbot_index && !is_continue) { - if (audio_debug) console.log('---------------------'); - if (audio_debug) console.log('text twisting!'); - if (audio_debug) console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed); - if (audio_debug) console.log('---------------------'); - prev_text_already_pushed = ""; - delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit - } - else { - // on_new_message_begin, we have to clear `prev_text_already_pushed` - if (audio_debug) console.log('---------------------'); - if (audio_debug) console.log('new message begin!'); - if (audio_debug) console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed); - if (audio_debug) console.log('---------------------'); - prev_text_already_pushed = ""; - process_increased_text(text); - delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit - } - prev_text = text; - prev_chatbot_index = chatbot_index; -} - -const audio_push_lock = new FIFOLock(); -async function push_text_to_audio(text) { - if (!allow_auto_read_tts_flag) { - return; - } - await audio_push_lock.lock(); - var lines = text.split(/[\n。]/); - for (const audio_buf_text of lines) { - if (audio_buf_text) { - // Append '/vits' to the current URL to form the target endpoint - const url = `${window.location.href}vits`; - // Define the payload to be sent in the POST request - const payload = { - text: audio_buf_text, // Ensure 'audio_buf_text' is defined with valid data - text_language: "zh" - }; - // Call the async postData function and log the response - post_text(url, payload, send_index); - send_index = send_index + 1; - if (audio_debug) console.log(send_index, audio_buf_text); - // sleep 2 seconds - if (allow_auto_read_tts_flag) { - await delay(3000); - } - } - } - audio_push_lock.unlock(); -} - - -send_index = 0; -recv_index = 0; -to_be_processed = []; -async function UpdatePlayQueue(cnt, audio_buf_wave) { - if (cnt != recv_index) { - to_be_processed.push([cnt, audio_buf_wave]); - if (audio_debug) console.log('cache', cnt); - } - else { - if (audio_debug) console.log('processing', cnt); - recv_index = recv_index + 1; - if (audio_buf_wave) { - audioPlayer.enqueueAudio(audio_buf_wave); - } - // deal with other cached audio - while (true) { - find_any = false; - for (i = to_be_processed.length - 1; i >= 0; i--) { - if (to_be_processed[i][0] == recv_index) { - if (audio_debug) console.log('processing cached', recv_index); - if (to_be_processed[i][1]) { - audioPlayer.enqueueAudio(to_be_processed[i][1]); - } - to_be_processed.pop(i); - find_any = true; - recv_index = recv_index + 1; - } - } - if (!find_any) { break; } - } - } -} - -function post_text(url, payload, cnt) { - if (allow_auto_read_tts_flag) { - postData(url, payload, cnt) - .then(data => { - UpdatePlayQueue(cnt, data); - return; - }); - } else { - UpdatePlayQueue(cnt, null); - return; - } -} - -notify_user_error = false -// Create an async function to perform the POST request -async function postData(url = '', data = {}) { - try { - // Use the Fetch API with await - const response = await fetch(url, { - method: 'POST', // Specify the request method - body: JSON.stringify(data), // Convert the JavaScript object to a JSON string - }); - // Check if the response is ok (status in the range 200-299) - if (!response.ok) { - // If not OK, throw an error - console.info('There was a problem during audio generation requests:', response.status); - // if (!notify_user_error){ - // notify_user_error = true; - // alert('There was a problem during audio generation requests:', response.status); - // } - return null; - } - // If OK, parse and return the JSON response - return await response.arrayBuffer(); - } catch (error) { - // Log any errors that occur during the fetch operation - console.info('There was a problem during audio generation requests:', error); - // if (!notify_user_error){ - // notify_user_error = true; - // alert('There was a problem during audio generation requests:', error); - // } - return null; - } -} - async function generate_menu(guiBase64String, btnName){ // assign the button and menu data push_data_to_gradio_component(guiBase64String, "invisible_current_pop_up_plugin_arg", "string"); @@ -1642,37 +1284,66 @@ async function duplicate_in_new_window() { } async function run_classic_plugin_via_id(plugin_elem_id){ - // find elementid for (key in plugin_init_info_lib){ if (plugin_init_info_lib[key].elem_id == plugin_elem_id){ + // 获取按钮名称 let current_btn_name = await get_data_from_gradio_component(plugin_elem_id); - console.log(current_btn_name); - - gui_args = {} - // 关闭菜单 (如果处于开启状态) - push_data_to_gradio_component({ - visible: false, - __type__: 'update' - }, "plugin_arg_menu", "obj"); - hide_all_elem(); - // 为了与旧插件兼容,生成菜单时,自动加载旧高级参数输入区的值 - let advance_arg_input_legacy = await get_data_from_gradio_component('advance_arg_input_legacy'); - if (advance_arg_input_legacy.length != 0){ - gui_args["advanced_arg"] = {}; - gui_args["advanced_arg"].user_confirmed_value = advance_arg_input_legacy; - } - // execute the plugin - push_data_to_gradio_component(JSON.stringify(gui_args), "invisible_current_pop_up_plugin_arg_final", "string"); - push_data_to_gradio_component(current_btn_name, "invisible_callback_btn_for_plugin_exe", "string"); - document.getElementById("invisible_callback_btn_for_plugin_exe").click(); + // 执行 + call_plugin_via_name(current_btn_name); return; } } - // console.log('unable to find function'); return; } +async function call_plugin_via_name(current_btn_name) { + gui_args = {} + // 关闭菜单 (如果处于开启状态) + push_data_to_gradio_component({ + visible: false, + __type__: 'update' + }, "plugin_arg_menu", "obj"); + hide_all_elem(); + // 为了与旧插件兼容,生成菜单时,自动加载旧高级参数输入区的值 + let advance_arg_input_legacy = await get_data_from_gradio_component('advance_arg_input_legacy'); + if (advance_arg_input_legacy.length != 0){ + gui_args["advanced_arg"] = {}; + gui_args["advanced_arg"].user_confirmed_value = advance_arg_input_legacy; + } + // execute the plugin + push_data_to_gradio_component(JSON.stringify(gui_args), "invisible_current_pop_up_plugin_arg_final", "string"); + push_data_to_gradio_component(current_btn_name, "invisible_callback_btn_for_plugin_exe", "string"); + document.getElementById("invisible_callback_btn_for_plugin_exe").click(); +} + + +// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +// 多用途复用提交按钮 +// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= async function click_real_submit_btn() { document.getElementById("elem_submit").click(); -} \ No newline at end of file +} +async function multiplex_function_begin(multiplex_sel) { + if (multiplex_sel === "常规对话") { + click_real_submit_btn(); + return; + } + if (multiplex_sel === "多模型对话") { + let _align_name_in_crazy_function_py = "询问多个GPT模型"; + call_plugin_via_name(_align_name_in_crazy_function_py); + return; + } +} +async function run_multiplex_shift(multiplex_sel){ + let key = multiplex_sel; + if (multiplex_sel === "常规对话") { + key = "提交"; + } else { + key = "提交 (" + multiplex_sel + ")"; + } + push_data_to_gradio_component({ + value: key, + __type__: 'update' + }, "elem_submit_visible", "obj"); +} diff --git a/themes/common.py b/themes/common.py index efae1a75..2ea97b27 100644 --- a/themes/common.py +++ b/themes/common.py @@ -30,6 +30,7 @@ def get_common_html_javascript_code(): common_js_path_list = [ "themes/common.js", "themes/theme.js", + "themes/tts.js", "themes/init.js", ] diff --git a/themes/tts.js b/themes/tts.js new file mode 100644 index 00000000..d9ae219a --- /dev/null +++ b/themes/tts.js @@ -0,0 +1,351 @@ +// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +// TTS语音生成函数 +// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +audio_debug = false; +class AudioPlayer { + constructor() { + this.audioCtx = new (window.AudioContext || window.webkitAudioContext)(); + this.queue = []; + this.isPlaying = false; + this.currentSource = null; // 添加属性来保存当前播放的源 + } + + // Base64 编码的字符串转换为 ArrayBuffer + base64ToArrayBuffer(base64) { + const binaryString = window.atob(base64); + const len = binaryString.length; + const bytes = new Uint8Array(len); + for (let i = 0; i < len; i++) { + bytes[i] = binaryString.charCodeAt(i); + } + return bytes.buffer; + } + + // 检查音频播放队列并播放音频 + checkQueue() { + if (!this.isPlaying && this.queue.length > 0) { + this.isPlaying = true; + const nextAudio = this.queue.shift(); + this.play_wave(nextAudio); + } + } + + // 将音频添加到播放队列 + enqueueAudio(audio_buf_wave) { + if (allow_auto_read_tts_flag) { + this.queue.push(audio_buf_wave); + this.checkQueue(); + } + } + + // 播放音频 + async play_wave(encodedAudio) { + //const audioData = this.base64ToArrayBuffer(encodedAudio); + const audioData = encodedAudio; + try { + const buffer = await this.audioCtx.decodeAudioData(audioData); + const source = this.audioCtx.createBufferSource(); + source.buffer = buffer; + source.connect(this.audioCtx.destination); + source.onended = () => { + if (allow_auto_read_tts_flag) { + this.isPlaying = false; + this.currentSource = null; // 播放结束后清空当前源 + this.checkQueue(); + } + }; + this.currentSource = source; // 保存当前播放的源 + source.start(); + } catch (e) { + console.log("Audio error!", e); + this.isPlaying = false; + this.currentSource = null; // 出错时也应清空当前源 + this.checkQueue(); + } + } + + // 新增:立即停止播放音频的方法 + stop() { + if (this.currentSource) { + this.queue = []; // 清空队列 + this.currentSource.stop(); // 停止当前源 + this.currentSource = null; // 清空当前源 + this.isPlaying = false; // 更新播放状态 + // 关闭音频上下文可能会导致无法再次播放音频,因此仅停止当前源 + // this.audioCtx.close(); // 可选:如果需要可以关闭音频上下文 + } + } +} + +const audioPlayer = new AudioPlayer(); + +class FIFOLock { + constructor() { + this.queue = []; + this.currentTaskExecuting = false; + } + + lock() { + let resolveLock; + const lock = new Promise(resolve => { + resolveLock = resolve; + }); + + this.queue.push(resolveLock); + + if (!this.currentTaskExecuting) { + this._dequeueNext(); + } + + return lock; + } + + _dequeueNext() { + if (this.queue.length === 0) { + this.currentTaskExecuting = false; + return; + } + this.currentTaskExecuting = true; + const resolveLock = this.queue.shift(); + resolveLock(); + } + + unlock() { + this.currentTaskExecuting = false; + this._dequeueNext(); + } +} + + + + + + + + +function delay(ms) { + return new Promise(resolve => setTimeout(resolve, ms)); +} + +// Define the trigger function with delay parameter T in milliseconds +function trigger(T, fire) { + // Variable to keep track of the timer ID + let timeoutID = null; + // Variable to store the latest arguments + let lastArgs = null; + + return function (...args) { + // Update lastArgs with the latest arguments + lastArgs = args; + // Clear the existing timer if the function is called again + if (timeoutID !== null) { + clearTimeout(timeoutID); + } + // Set a new timer that calls the `fire` function with the latest arguments after T milliseconds + timeoutID = setTimeout(() => { + fire(...lastArgs); + }, T); + }; +} + + +prev_text = ""; // previous text, this is used to check chat changes +prev_text_already_pushed = ""; // previous text already pushed to audio, this is used to check where we should continue to play audio +prev_chatbot_index = -1; +const delay_live_text_update = trigger(3000, on_live_stream_terminate); + +function on_live_stream_terminate(latest_text) { + // remove `prev_text_already_pushed` from `latest_text` + if (audio_debug) console.log("on_live_stream_terminate", latest_text); + remaining_text = latest_text.slice(prev_text_already_pushed.length); + if ((!isEmptyOrWhitespaceOnly(remaining_text)) && remaining_text.length != 0) { + prev_text_already_pushed = latest_text; + push_text_to_audio(remaining_text); + } +} +function is_continue_from_prev(text, prev_text) { + abl = 5 + if (text.length < prev_text.length - abl) { + return false; + } + if (prev_text.length > 10) { + return text.startsWith(prev_text.slice(0, Math.min(prev_text.length - abl, 100))); + } else { + return text.startsWith(prev_text); + } +} +function isEmptyOrWhitespaceOnly(remaining_text) { + // Replace \n and 。 with empty strings + let textWithoutSpecifiedCharacters = remaining_text.replace(/[\n。]/g, ''); + // Check if the remaining string is empty + return textWithoutSpecifiedCharacters.trim().length === 0; +} +function process_increased_text(remaining_text) { + // console.log('[is continue], remaining_text: ', remaining_text) + // remaining_text starts with \n or 。, then move these chars into prev_text_already_pushed + while (remaining_text.startsWith('\n') || remaining_text.startsWith('。')) { + prev_text_already_pushed = prev_text_already_pushed + remaining_text[0]; + remaining_text = remaining_text.slice(1); + } + if (remaining_text.includes('\n') || remaining_text.includes('。')) { // determine remaining_text contain \n or 。 + // new message begin! + index_of_last_sep = Math.max(remaining_text.lastIndexOf('\n'), remaining_text.lastIndexOf('。')); + // break the text into two parts + tobe_pushed = remaining_text.slice(0, index_of_last_sep + 1); + prev_text_already_pushed = prev_text_already_pushed + tobe_pushed; + // console.log('[is continue], push: ', tobe_pushed) + // console.log('[is continue], update prev_text_already_pushed: ', prev_text_already_pushed) + if (!isEmptyOrWhitespaceOnly(tobe_pushed)) { + // console.log('[is continue], remaining_text is empty') + push_text_to_audio(tobe_pushed); + } + } +} +function process_latest_text_output(text, chatbot_index) { + if (text.length == 0) { + prev_text = text; + prev_text_mask = text; + // console.log('empty text') + return; + } + if (text == prev_text) { + // console.log('[nothing changed]') + return; + } + + var is_continue = is_continue_from_prev(text, prev_text_already_pushed); + if (chatbot_index == prev_chatbot_index && is_continue) { + // on_text_continue_grow + remaining_text = text.slice(prev_text_already_pushed.length); + process_increased_text(remaining_text); + delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit + } + else if (chatbot_index == prev_chatbot_index && !is_continue) { + if (audio_debug) console.log('---------------------'); + if (audio_debug) console.log('text twisting!'); + if (audio_debug) console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed); + if (audio_debug) console.log('---------------------'); + prev_text_already_pushed = ""; + delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit + } + else { + // on_new_message_begin, we have to clear `prev_text_already_pushed` + if (audio_debug) console.log('---------------------'); + if (audio_debug) console.log('new message begin!'); + if (audio_debug) console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed); + if (audio_debug) console.log('---------------------'); + prev_text_already_pushed = ""; + process_increased_text(text); + delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit + } + prev_text = text; + prev_chatbot_index = chatbot_index; +} + +const audio_push_lock = new FIFOLock(); +async function push_text_to_audio(text) { + if (!allow_auto_read_tts_flag) { + return; + } + await audio_push_lock.lock(); + var lines = text.split(/[\n。]/); + for (const audio_buf_text of lines) { + if (audio_buf_text) { + // Append '/vits' to the current URL to form the target endpoint + const url = `${window.location.href}vits`; + // Define the payload to be sent in the POST request + const payload = { + text: audio_buf_text, // Ensure 'audio_buf_text' is defined with valid data + text_language: "zh" + }; + // Call the async postData function and log the response + post_text(url, payload, send_index); + send_index = send_index + 1; + if (audio_debug) console.log(send_index, audio_buf_text); + // sleep 2 seconds + if (allow_auto_read_tts_flag) { + await delay(3000); + } + } + } + audio_push_lock.unlock(); +} + + +send_index = 0; +recv_index = 0; +to_be_processed = []; +async function UpdatePlayQueue(cnt, audio_buf_wave) { + if (cnt != recv_index) { + to_be_processed.push([cnt, audio_buf_wave]); + if (audio_debug) console.log('cache', cnt); + } + else { + if (audio_debug) console.log('processing', cnt); + recv_index = recv_index + 1; + if (audio_buf_wave) { + audioPlayer.enqueueAudio(audio_buf_wave); + } + // deal with other cached audio + while (true) { + find_any = false; + for (i = to_be_processed.length - 1; i >= 0; i--) { + if (to_be_processed[i][0] == recv_index) { + if (audio_debug) console.log('processing cached', recv_index); + if (to_be_processed[i][1]) { + audioPlayer.enqueueAudio(to_be_processed[i][1]); + } + to_be_processed.pop(i); + find_any = true; + recv_index = recv_index + 1; + } + } + if (!find_any) { break; } + } + } +} + +function post_text(url, payload, cnt) { + if (allow_auto_read_tts_flag) { + postData(url, payload, cnt) + .then(data => { + UpdatePlayQueue(cnt, data); + return; + }); + } else { + UpdatePlayQueue(cnt, null); + return; + } +} + +notify_user_error = false +// Create an async function to perform the POST request +async function postData(url = '', data = {}) { + try { + // Use the Fetch API with await + const response = await fetch(url, { + method: 'POST', // Specify the request method + body: JSON.stringify(data), // Convert the JavaScript object to a JSON string + }); + // Check if the response is ok (status in the range 200-299) + if (!response.ok) { + // If not OK, throw an error + console.info('There was a problem during audio generation requests:', response.status); + // if (!notify_user_error){ + // notify_user_error = true; + // alert('There was a problem during audio generation requests:', response.status); + // } + return null; + } + // If OK, parse and return the JSON response + return await response.arrayBuffer(); + } catch (error) { + // Log any errors that occur during the fetch operation + console.info('There was a problem during audio generation requests:', error); + // if (!notify_user_error){ + // notify_user_error = true; + // alert('There was a problem during audio generation requests:', error); + // } + return null; + } +} \ No newline at end of file