diff --git a/.gitignore b/.gitignore
index f8b24d75..be959f73 100644
--- a/.gitignore
+++ b/.gitignore
@@ -131,6 +131,9 @@ dmypy.json
# Pyre type checker
.pyre/
+# macOS files
+.DS_Store
+
.vscode
.idea
diff --git a/crazy_functions/Internet_GPT.py b/crazy_functions/Internet_GPT.py
index 840990fd..b43cdd48 100644
--- a/crazy_functions/Internet_GPT.py
+++ b/crazy_functions/Internet_GPT.py
@@ -3,10 +3,106 @@ from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_cl
import requests
from bs4 import BeautifulSoup
from request_llms.bridge_all import model_info
-import urllib.request
import random
from functools import lru_cache
from check_proxy import check_proxy
+from request_llms.bridge_all import predict_no_ui_long_connection
+from .prompts.Internet_GPT import Search_optimizer, Search_academic_optimizer
+import time
+import re
+import json
+from itertools import zip_longest
+
+def search_optimizer(
+ query,
+ proxies,
+ history,
+ llm_kwargs,
+ optimizer=1,
+ categories="general",
+ searxng_url=None,
+ engines=None,
+):
+ # ------------- < 第1步:尝试进行搜索优化 > -------------
+ # * 增强优化,会尝试结合历史记录进行搜索优化
+ if optimizer == 2:
+ his = " "
+ if len(history) == 0:
+ pass
+ else:
+ for i, h in enumerate(history):
+ if i % 2 == 0:
+ his += f"Q: {h}\n"
+ else:
+ his += f"A: {h}\n"
+ if categories == "general":
+ sys_prompt = Search_optimizer.format(query=query, history=his, num=4)
+ elif categories == "science":
+ sys_prompt = Search_academic_optimizer.format(query=query, history=his, num=4)
+ else:
+ his = " "
+ if categories == "general":
+ sys_prompt = Search_optimizer.format(query=query, history=his, num=3)
+ elif categories == "science":
+ sys_prompt = Search_academic_optimizer.format(query=query, history=his, num=3)
+
+ mutable = ["", time.time(), ""]
+ llm_kwargs["temperature"] = 0.8
+ try:
+ querys_json = predict_no_ui_long_connection(
+ inputs=query,
+ llm_kwargs=llm_kwargs,
+ history=[],
+ sys_prompt=sys_prompt,
+ observe_window=mutable,
+ )
+ except Exception:
+ querys_json = "1234"
+ #* 尝试解码优化后的搜索结果
+ querys_json = re.sub(r"```json|```", "", querys_json)
+ try:
+ querys = json.loads(querys_json)
+ except Exception:
+ #* 如果解码失败,降低温度再试一次
+ try:
+ llm_kwargs["temperature"] = 0.4
+ querys_json = predict_no_ui_long_connection(
+ inputs=query,
+ llm_kwargs=llm_kwargs,
+ history=[],
+ sys_prompt=sys_prompt,
+ observe_window=mutable,
+ )
+ querys_json = re.sub(r"```json|```", "", querys_json)
+ querys = json.loads(querys_json)
+ except Exception:
+ #* 如果再次失败,直接返回原始问题
+ querys = [query]
+ links = []
+ success = 0
+ Exceptions = ""
+ for q in querys:
+ try:
+ link = searxng_request(q, proxies, categories, searxng_url, engines=engines)
+ if len(link) > 0:
+ links.append(link[:-5])
+ success += 1
+ except Exception:
+ Exceptions = Exception
+ pass
+ if success == 0:
+ raise ValueError(f"在线搜索失败!\n{Exceptions}")
+ # * 清洗搜索结果,依次放入每组第一,第二个搜索结果,并清洗重复的搜索结果
+ seen_links = set()
+ result = []
+ for tuple in zip_longest(*links, fillvalue=None):
+ for item in tuple:
+ if item is not None:
+ link = item["link"]
+ if link not in seen_links:
+ seen_links.add(link)
+ result.append(item)
+ return result
@lru_cache
def get_auth_ip():
@@ -21,8 +117,8 @@ def searxng_request(query, proxies, categories='general', searxng_url=None, engi
else:
url = searxng_url
- if engines is None:
- engines = 'bing'
+ if engines == "Mixed":
+ engines = None
if categories == 'general':
params = {
@@ -95,7 +191,7 @@ def scrape_text(url, proxies) -> str:
@CatchException
def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
-
+ optimizer_history = history[:-8]
history = [] # 清空历史,以免输入溢出
chatbot.append((f"请结合互联网信息回答以下问题:{txt}", "检索中..."))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@@ -106,16 +202,23 @@ def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
categories = plugin_kwargs.get('categories', 'general')
searxng_url = plugin_kwargs.get('searxng_url', None)
engines = plugin_kwargs.get('engine', None)
- urls = searxng_request(txt, proxies, categories, searxng_url, engines=engines)
+ optimizer = plugin_kwargs.get('optimizer', 0)
+ if optimizer == 0:
+ urls = searxng_request(txt, proxies, categories, searxng_url, engines=engines)
+ else:
+ urls = search_optimizer(txt, proxies, optimizer_history, llm_kwargs, optimizer, categories, searxng_url, engines)
history = []
if len(urls) == 0:
chatbot.append((f"结论:{txt}",
"[Local Message] 受到限制,无法从searxng获取信息!请尝试更换搜索引擎。"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
return
+
# ------------- < 第2步:依次访问网页 > -------------
max_search_result = 5 # 最多收纳多少个网页的结果
- chatbot.append([f"联网检索中 ...", None])
+ if optimizer == 2:
+ max_search_result = 8
+ chatbot.append(["联网检索中 ...", None])
for index, url in enumerate(urls[:max_search_result]):
res = scrape_text(url['link'], proxies)
prefix = f"第{index}份搜索结果 [源自{url['source'][0]}搜索] ({url['title'][:25]}):"
@@ -125,18 +228,46 @@ def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# ------------- < 第3步:ChatGPT综合 > -------------
- i_say = f"从以上搜索结果中抽取信息,然后回答问题:{txt}"
- i_say, history = input_clipping( # 裁剪输入,从最长的条目开始裁剪,防止爆token
- inputs=i_say,
- history=history,
- max_token_limit=min(model_info[llm_kwargs['llm_model']]['max_token']*3//4, 8192)
- )
- gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
- inputs=i_say, inputs_show_user=i_say,
- llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
- sys_prompt="请从给定的若干条搜索结果中抽取信息,对最相关的两个搜索结果进行总结,然后回答问题。"
- )
- chatbot[-1] = (i_say, gpt_say)
- history.append(i_say);history.append(gpt_say)
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
+ if (optimizer == 0 or optimizer == 1):
+ i_say = f"从以上搜索结果中抽取信息,然后回答问题:{txt}"
+ i_say, history = input_clipping( # 裁剪输入,从最长的条目开始裁剪,防止爆token
+ inputs=i_say,
+ history=history,
+ max_token_limit=min(model_info[llm_kwargs['llm_model']]['max_token']*3//4, 8192)
+ )
+ gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
+ inputs=i_say, inputs_show_user=i_say,
+ llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
+ sys_prompt="请从给定的若干条搜索结果中抽取信息,对最相关的两个搜索结果进行总结,然后回答问题。"
+ )
+ chatbot[-1] = (i_say, gpt_say)
+ history.append(i_say);history.append(gpt_say)
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
+ #* 或者使用搜索优化器,这样可以保证后续问答能读取到有效的历史记录
+ else:
+ i_say = f"从以上搜索结果中抽取与问题:{txt} 相关的信息:"
+ i_say, history = input_clipping( # 裁剪输入,从最长的条目开始裁剪,防止爆token
+ inputs=i_say,
+ history=history,
+ max_token_limit=min(model_info[llm_kwargs['llm_model']]['max_token']*3//4, 8192)
+ )
+ gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
+ inputs=i_say, inputs_show_user=i_say,
+ llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
+ sys_prompt="请从给定的若干条搜索结果中抽取信息,对最相关的三个搜索结果进行总结"
+ )
+ chatbot[-1] = (i_say, gpt_say)
+ history = []
+ history.append(i_say);history.append(gpt_say)
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
+ # ------------- < 第4步:根据综合回答问题 > -------------
+ i_say = f"请根据以上搜索结果回答问题:{txt}"
+ gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
+ inputs=i_say, inputs_show_user=i_say,
+ llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
+ sys_prompt="请根据给定的若干条搜索结果回答问题"
+ )
+ chatbot[-1] = (i_say, gpt_say)
+ history.append(i_say);history.append(gpt_say)
+ yield from update_ui(chatbot=chatbot, history=history)
\ No newline at end of file
diff --git a/crazy_functions/Internet_GPT_Wrap.py b/crazy_functions/Internet_GPT_Wrap.py
index 8d3aa43f..dbc13af9 100644
--- a/crazy_functions/Internet_GPT_Wrap.py
+++ b/crazy_functions/Internet_GPT_Wrap.py
@@ -22,11 +22,13 @@ class NetworkGPT_Wrap(GptAcademicPluginTemplate):
"""
gui_definition = {
"main_input":
- ArgProperty(title="输入问题", description="待通过互联网检索的问题", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
+ ArgProperty(title="输入问题", description="待通过互联网检索的问题,会自动读取输入框内容", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
"categories":
ArgProperty(title="搜索分类", options=["网页", "学术论文"], default_value="网页", description="无", type="dropdown").model_dump_json(),
"engine":
- ArgProperty(title="选择搜索引擎", options=["bing", "google", "duckduckgo"], default_value="bing", description="无", type="dropdown").model_dump_json(),
+ ArgProperty(title="选择搜索引擎", options=["Mixed", "bing", "google", "duckduckgo"], default_value="Mixed", description="无", type="dropdown").model_dump_json(),
+ "optimizer":
+ ArgProperty(title="搜索优化", options=["关闭", "开启", "开启(增强)"], default_value="关闭", description="是否使用搜索增强。注意这可能会消耗较多token", type="dropdown").model_dump_json(),
"searxng_url":
ArgProperty(title="Searxng服务地址", description="输入Searxng的地址", default_value=get_conf("SEARXNG_URL"), type="string").model_dump_json(), # 主输入,自动从输入框同步
@@ -39,6 +41,7 @@ class NetworkGPT_Wrap(GptAcademicPluginTemplate):
"""
if plugin_kwargs["categories"] == "网页": plugin_kwargs["categories"] = "general"
if plugin_kwargs["categories"] == "学术论文": plugin_kwargs["categories"] = "science"
-
+ optimizer_options=["关闭", "开启", "开启(增强)"]
+ plugin_kwargs["optimizer"] = optimizer_options.index(plugin_kwargs["optimizer"])
yield from 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
diff --git a/crazy_functions/prompts/Internet_GPT.py b/crazy_functions/prompts/Internet_GPT.py
new file mode 100644
index 00000000..a623808d
--- /dev/null
+++ b/crazy_functions/prompts/Internet_GPT.py
@@ -0,0 +1,87 @@
+Search_optimizer="""作为一个网页搜索助手,你的任务是结合历史记录,从不同角度,为“原问题”生成个不同版本的“检索词”,从而提高网页检索的精度。生成的问题要求指向对象清晰明确,并与“原问题语言相同”。例如:
+历史记录:
+"
+Q: 对话背景。
+A: 当前对话是关于 Nginx 的介绍和在Ubuntu上的使用等。
+"
+原问题: 怎么下载
+检索词: ["Nginx 下载","Ubuntu Nginx","Ubuntu安装Nginx"]
+----------------
+历史记录:
+"
+Q: 对话背景。
+A: 当前对话是关于 Nginx 的介绍和使用等。
+Q: 报错 "no connection"
+A: 报错"no connection"可能是因为……
+"
+原问题: 怎么解决
+检索词: ["Nginx报错"no connection" 解决","Nginx'no connection'报错 原因","Nginx提示'no connection'"]
+----------------
+历史记录:
+"
+
+"
+原问题: 你知道 Python 么?
+检索词: ["Python","Python 使用教程。","Python 特点和优势"]
+----------------
+历史记录:
+"
+Q: 列出Java的三种特点?
+A: 1. Java 是一种编译型语言。
+ 2. Java 是一种面向对象的编程语言。
+ 3. Java 是一种跨平台的编程语言。
+"
+原问题: 介绍下第2点。
+检索词: ["Java 面向对象特点","Java 面向对象编程优势。","Java 面向对象编程"]
+----------------
+现在有历史记录:
+"
+{history}
+"
+有其原问题: {query}
+直接给出最多{num}个检索词,必须以json形式给出,不得有多余字符:
+"""
+
+Search_academic_optimizer="""作为一个学术论文搜索助手,你的任务是结合历史记录,从不同角度,为“原问题”生成个不同版本的“检索词”,从而提高学术论文检索的精度。生成的问题要求指向对象清晰明确,并与“原问题语言相同”。例如:
+历史记录:
+"
+Q: 对话背景。
+A: 当前对话是关于深度学习的介绍和在图像识别中的应用等。
+"
+原问题: 怎么下载相关论文
+检索词: ["深度学习 图像识别 论文下载","图像识别 深度学习 研究论文","深度学习 图像识别 论文资源","Deep Learning Image Recognition Paper Download","Image Recognition Deep Learning Research Paper"]
+----------------
+历史记录:
+"
+Q: 对话背景。
+A: 当前对话是关于深度学习的介绍和应用等。
+Q: 报错 "模型不收敛"
+A: 报错"模型不收敛"可能是因为……
+"
+原问题: 怎么解决
+检索词: ["深度学习 模型不收敛 解决方案 论文","深度学习 模型不收敛 原因 研究","深度学习 模型不收敛 论文","Deep Learning Model Convergence Issue Solution Paper","Deep Learning Model Convergence Problem Research"]
+----------------
+历史记录:
+"
+
+"
+原问题: 你知道 GAN 么?
+检索词: ["生成对抗网络 论文","GAN 使用教程 论文","GAN 特点和优势 研究","Generative Adversarial Network Paper","GAN Usage Tutorial Paper"]
+----------------
+历史记录:
+"
+Q: 列出机器学习的三种应用?
+A: 1. 机器学习在图像识别中的应用。
+ 2. 机器学习在自然语言处理中的应用。
+ 3. 机器学习在推荐系统中的应用。
+"
+原问题: 介绍下第2点。
+检索词: ["机器学习 自然语言处理 应用 论文","机器学习 自然语言处理 研究","机器学习 NLP 应用 论文","Machine Learning Natural Language Processing Application Paper","Machine Learning NLP Research"]
+----------------
+现在有历史记录:
+"
+{history}
+"
+有其原问题: {query}
+直接给出最多{num}个检索词,必须以json形式给出,不得有多余字符:
+"""
\ No newline at end of file
diff --git a/main.py b/main.py
index 0ad79b78..8de1cc2c 100644
--- a/main.py
+++ b/main.py
@@ -112,8 +112,18 @@ def main():
with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary:
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Input question here.", elem_id='user_input_main').style(container=False)
- with gr.Row():
- submitBtn = gr.Button("提交", elem_id="elem_submit", variant="primary")
+ with gr.Row(elem_id="gpt-submit-row"):
+ multiplex_submit_btn = gr.Button("提交", elem_id="elem_submit_visible", variant="primary")
+ multiplex_sel = gr.Dropdown(
+ choices=[
+ "常规对话",
+ "多模型对话",
+ # "智能上下文",
+ # "智能召回 RAG",
+ ], value="常规对话",
+ interactive=True, label='', show_label=False,
+ elem_classes='normal_mut_select', elem_id="gpt-submit-dropdown").style(container=False)
+ submit_btn = gr.Button("提交", elem_id="elem_submit", variant="primary", visible=False)
with gr.Row():
resetBtn = gr.Button("重置", elem_id="elem_reset", variant="secondary"); resetBtn.style(size="sm")
stopBtn = gr.Button("停止", elem_id="elem_stop", variant="secondary"); stopBtn.style(size="sm")
@@ -160,7 +170,7 @@ def main():
if not plugin.get("AsButton", True): dropdown_fn_list.append(k) # 排除已经是按钮的插件
elif plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示
with gr.Row():
- dropdown = gr.Dropdown(dropdown_fn_list, value=r"点击这里搜索插件列表", label="", show_label=False).style(container=False)
+ dropdown = gr.Dropdown(dropdown_fn_list, value=r"点击这里输入「关键词」搜索插件", label="", show_label=False).style(container=False)
with gr.Row():
plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False, elem_id="advance_arg_input_legacy",
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
@@ -177,7 +187,7 @@ def main():
# 浮动菜单定义
from themes.gui_floating_menu import define_gui_floating_menu
- area_input_secondary, txt2, area_customize, submitBtn2, resetBtn2, clearBtn2, stopBtn2 = \
+ area_input_secondary, txt2, area_customize, _, resetBtn2, clearBtn2, stopBtn2 = \
define_gui_floating_menu(customize_btns, functional, predefined_btns, cookies, web_cookie_cache)
# 插件二级菜单的实现
@@ -209,11 +219,15 @@ def main():
input_combo_order = ["cookies", "max_length_sl", "md_dropdown", "txt", "txt2", "top_p", "temperature", "chatbot", "history", "system_prompt", "plugin_advanced_arg"]
output_combo = [cookies, chatbot, history, status]
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True)], outputs=output_combo)
+
# 提交按钮、重置按钮
- cancel_handles.append(txt.submit(**predict_args))
- cancel_handles.append(txt2.submit(**predict_args))
- cancel_handles.append(submitBtn.click(**predict_args))
- cancel_handles.append(submitBtn2.click(**predict_args))
+ multiplex_submit_btn.click(
+ None, [multiplex_sel], None, _js="""(multiplex_sel)=>multiplex_function_begin(multiplex_sel)""")
+ txt.submit(
+ None, [multiplex_sel], None, _js="""(multiplex_sel)=>multiplex_function_begin(multiplex_sel)""")
+ multiplex_sel.select(
+ None, [multiplex_sel], None, _js=f"""(multiplex_sel)=>run_multiplex_shift(multiplex_sel)""")
+ cancel_handles.append(submit_btn.click(**predict_args))
resetBtn.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
resetBtn2.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
reset_server_side_args = (lambda history: ([], [], "已重置", json.dumps(history)), [history], [chatbot, history, status, history_cache])
@@ -222,10 +236,7 @@ def main():
clearBtn.click(None, None, [txt, txt2], _js=js_code_clear)
clearBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
if AUTO_CLEAR_TXT:
- submitBtn.click(None, None, [txt, txt2], _js=js_code_clear)
- submitBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
- txt.submit(None, None, [txt, txt2], _js=js_code_clear)
- txt2.submit(None, None, [txt, txt2], _js=js_code_clear)
+ submit_btn.click(None, None, [txt, txt2], _js=js_code_clear)
# 基础功能区的回调函数注册
for k in functional:
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
@@ -238,7 +249,6 @@ def main():
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}")
file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}")
# 函数插件-固定按钮区
-
for k in plugins:
register_advanced_plugin_init_arr += f"""register_plugin_init("{k}","{encode_plugin_info(k, plugins[k])}");"""
if plugins[k].get("Class", None):
diff --git a/themes/common.css b/themes/common.css
index b2c9b8cd..eb929cff 100644
--- a/themes/common.css
+++ b/themes/common.css
@@ -142,3 +142,132 @@
border-top-width: 0;
}
+
+.welcome-card-container {
+ text-align: center;
+ margin: 0 auto;
+ display: flex;
+ position: absolute;
+ width: inherit;
+ padding: 50px;
+ top: 50%;
+ left: 50%;
+ transform: translate(-50%, -50%);
+ flex-wrap: wrap;
+ justify-content: center;
+ transition: opacity 1s ease-in-out;
+ opacity: 0;
+}
+.welcome-card-container.show {
+ opacity: 1;
+}
+.welcome-card-container.hide {
+ opacity: 0;
+}
+.welcome-card {
+ border-radius: 10px;
+ box-shadow: 0px 0px 6px 3px #e5e7eb6b;
+ padding: 15px;
+ margin: 10px;
+ flex: 1 0 calc(30% - 5px);
+ transform: rotateY(0deg);
+ transition: transform 0.1s;
+ transform-style: preserve-3d;
+}
+.welcome-card.show {
+ transform: rotateY(0deg);
+}
+.welcome-card.hide {
+ transform: rotateY(90deg);
+}
+.welcome-title {
+ font-size: 40px;
+ padding: 20px;
+ margin: 10px;
+ flex: 0 0 calc(90%);
+}
+.welcome-card-title {
+ font-size: 20px;
+ margin: 2px;
+ flex: 0 0 calc(95%);
+ padding-bottom: 8px;
+ padding-top: 8px;
+ padding-right: 8px;
+ padding-left: 8px;
+ display: flex;
+ justify-content: center;
+}
+.welcome-svg {
+ padding-right: 10px;
+}
+
+.welcome-title-text {
+ text-wrap: nowrap;
+}
+.welcome-content {
+ text-wrap: balance;
+ height: 55px;
+ display: flex;
+ align-items: center;
+}
+
+
+#gpt-submit-row {
+ display: flex;
+ gap: 0 !important;
+ border-radius: var(--button-large-radius);
+ border: var(--button-border-width) solid var(--button-primary-border-color);
+ /* background: var(--button-primary-background-fill); */
+ background: var(--button-primary-background-fill-hover);
+ color: var(--button-primary-text-color);
+ box-shadow: var(--button-shadow);
+ transition: var(--button-transition);
+ display: flex;
+}
+#gpt-submit-row:hover {
+ border-color: var(--button-primary-border-color-hover);
+ /* background: var(--button-primary-background-fill-hover); */
+ /* color: var(--button-primary-text-color-hover); */
+}
+#gpt-submit-row button#elem_submit_visible {
+ border-top-right-radius: 0px;
+ border-bottom-right-radius: 0px;
+ box-shadow: none !important;
+ flex-grow: 1;
+}
+#gpt-submit-row #gpt-submit-dropdown {
+ border-top-left-radius: 0px;
+ border-bottom-left-radius: 0px;
+ border-left: 0.5px solid #FFFFFF88 !important;
+ display: flex;
+ overflow: unset !important;
+ max-width: 40px !important;
+ min-width: 40px !important;
+}
+#gpt-submit-row #gpt-submit-dropdown input {
+ pointer-events: none;
+ opacity: 0; /* 隐藏输入框 */
+ width: 0;
+ margin-inline: 0;
+ cursor: pointer;
+}
+#gpt-submit-row #gpt-submit-dropdown label {
+ display: flex;
+ width: 0;
+}
+#gpt-submit-row #gpt-submit-dropdown label div.wrap {
+ background: none;
+ box-shadow: none;
+ border: none;
+}
+#gpt-submit-row #gpt-submit-dropdown label div.wrap div.wrap-inner {
+ background: none;
+ padding-inline: 0;
+ height: 100%;
+}
+#gpt-submit-row #gpt-submit-dropdown svg.dropdown-arrow {
+ transform: scale(2) translate(4.5px, -0.3px);
+}
+#gpt-submit-row #gpt-submit-dropdown > *:hover {
+ cursor: context-menu;
+}
\ No newline at end of file
diff --git a/themes/common.js b/themes/common.js
index 34351f08..6b631ab4 100644
--- a/themes/common.js
+++ b/themes/common.js
@@ -796,6 +796,26 @@ function minor_ui_adjustment() {
}, 200); // 每50毫秒执行一次
}
+// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+// 对提交按钮的下拉选框做的变化
+// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+function ButtonWithDropdown_init() {
+ let submitButton = document.querySelector('button#elem_submit_visible');
+ let submitDropdown = document.querySelector('#gpt-submit-dropdown');
+ function updateDropdownWidth() {
+ if (submitButton) {
+ let setWidth = submitButton.clientWidth + submitDropdown.clientWidth;
+ let setLeft = -1 * submitButton.clientWidth;
+ document.getElementById('submit-dropdown-style')?.remove();
+ const styleElement = document.createElement('style');
+ styleElement.id = 'submit-dropdown-style';
+ styleElement.innerHTML = `#gpt-submit-dropdown ul.options { width: ${setWidth}px; left: ${setLeft}px; }`;
+ document.head.appendChild(styleElement);
+ }
+ }
+ window.addEventListener('resize', updateDropdownWidth);
+ updateDropdownWidth();
+}
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
// 第 6 部分: 避免滑动
@@ -1050,364 +1070,6 @@ async function on_plugin_exe_complete(fn_name) {
}
-
-
-
-
-
-
-// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-// 第 8 部分: TTS语音生成函数
-// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-audio_debug = false;
-class AudioPlayer {
- constructor() {
- this.audioCtx = new (window.AudioContext || window.webkitAudioContext)();
- this.queue = [];
- this.isPlaying = false;
- this.currentSource = null; // 添加属性来保存当前播放的源
- }
-
- // Base64 编码的字符串转换为 ArrayBuffer
- base64ToArrayBuffer(base64) {
- const binaryString = window.atob(base64);
- const len = binaryString.length;
- const bytes = new Uint8Array(len);
- for (let i = 0; i < len; i++) {
- bytes[i] = binaryString.charCodeAt(i);
- }
- return bytes.buffer;
- }
-
- // 检查音频播放队列并播放音频
- checkQueue() {
- if (!this.isPlaying && this.queue.length > 0) {
- this.isPlaying = true;
- const nextAudio = this.queue.shift();
- this.play_wave(nextAudio);
- }
- }
-
- // 将音频添加到播放队列
- enqueueAudio(audio_buf_wave) {
- if (allow_auto_read_tts_flag) {
- this.queue.push(audio_buf_wave);
- this.checkQueue();
- }
- }
-
- // 播放音频
- async play_wave(encodedAudio) {
- //const audioData = this.base64ToArrayBuffer(encodedAudio);
- const audioData = encodedAudio;
- try {
- const buffer = await this.audioCtx.decodeAudioData(audioData);
- const source = this.audioCtx.createBufferSource();
- source.buffer = buffer;
- source.connect(this.audioCtx.destination);
- source.onended = () => {
- if (allow_auto_read_tts_flag) {
- this.isPlaying = false;
- this.currentSource = null; // 播放结束后清空当前源
- this.checkQueue();
- }
- };
- this.currentSource = source; // 保存当前播放的源
- source.start();
- } catch (e) {
- console.log("Audio error!", e);
- this.isPlaying = false;
- this.currentSource = null; // 出错时也应清空当前源
- this.checkQueue();
- }
- }
-
- // 新增:立即停止播放音频的方法
- stop() {
- if (this.currentSource) {
- this.queue = []; // 清空队列
- this.currentSource.stop(); // 停止当前源
- this.currentSource = null; // 清空当前源
- this.isPlaying = false; // 更新播放状态
- // 关闭音频上下文可能会导致无法再次播放音频,因此仅停止当前源
- // this.audioCtx.close(); // 可选:如果需要可以关闭音频上下文
- }
- }
-}
-
-const audioPlayer = new AudioPlayer();
-
-class FIFOLock {
- constructor() {
- this.queue = [];
- this.currentTaskExecuting = false;
- }
-
- lock() {
- let resolveLock;
- const lock = new Promise(resolve => {
- resolveLock = resolve;
- });
-
- this.queue.push(resolveLock);
-
- if (!this.currentTaskExecuting) {
- this._dequeueNext();
- }
-
- return lock;
- }
-
- _dequeueNext() {
- if (this.queue.length === 0) {
- this.currentTaskExecuting = false;
- return;
- }
- this.currentTaskExecuting = true;
- const resolveLock = this.queue.shift();
- resolveLock();
- }
-
- unlock() {
- this.currentTaskExecuting = false;
- this._dequeueNext();
- }
-}
-
-
-
-
-
-
-
-
-function delay(ms) {
- return new Promise(resolve => setTimeout(resolve, ms));
-}
-
-// Define the trigger function with delay parameter T in milliseconds
-function trigger(T, fire) {
- // Variable to keep track of the timer ID
- let timeoutID = null;
- // Variable to store the latest arguments
- let lastArgs = null;
-
- return function (...args) {
- // Update lastArgs with the latest arguments
- lastArgs = args;
- // Clear the existing timer if the function is called again
- if (timeoutID !== null) {
- clearTimeout(timeoutID);
- }
- // Set a new timer that calls the `fire` function with the latest arguments after T milliseconds
- timeoutID = setTimeout(() => {
- fire(...lastArgs);
- }, T);
- };
-}
-
-
-prev_text = ""; // previous text, this is used to check chat changes
-prev_text_already_pushed = ""; // previous text already pushed to audio, this is used to check where we should continue to play audio
-prev_chatbot_index = -1;
-const delay_live_text_update = trigger(3000, on_live_stream_terminate);
-
-function on_live_stream_terminate(latest_text) {
- // remove `prev_text_already_pushed` from `latest_text`
- if (audio_debug) console.log("on_live_stream_terminate", latest_text);
- remaining_text = latest_text.slice(prev_text_already_pushed.length);
- if ((!isEmptyOrWhitespaceOnly(remaining_text)) && remaining_text.length != 0) {
- prev_text_already_pushed = latest_text;
- push_text_to_audio(remaining_text);
- }
-}
-function is_continue_from_prev(text, prev_text) {
- abl = 5
- if (text.length < prev_text.length - abl) {
- return false;
- }
- if (prev_text.length > 10) {
- return text.startsWith(prev_text.slice(0, Math.min(prev_text.length - abl, 100)));
- } else {
- return text.startsWith(prev_text);
- }
-}
-function isEmptyOrWhitespaceOnly(remaining_text) {
- // Replace \n and 。 with empty strings
- let textWithoutSpecifiedCharacters = remaining_text.replace(/[\n。]/g, '');
- // Check if the remaining string is empty
- return textWithoutSpecifiedCharacters.trim().length === 0;
-}
-function process_increased_text(remaining_text) {
- // console.log('[is continue], remaining_text: ', remaining_text)
- // remaining_text starts with \n or 。, then move these chars into prev_text_already_pushed
- while (remaining_text.startsWith('\n') || remaining_text.startsWith('。')) {
- prev_text_already_pushed = prev_text_already_pushed + remaining_text[0];
- remaining_text = remaining_text.slice(1);
- }
- if (remaining_text.includes('\n') || remaining_text.includes('。')) { // determine remaining_text contain \n or 。
- // new message begin!
- index_of_last_sep = Math.max(remaining_text.lastIndexOf('\n'), remaining_text.lastIndexOf('。'));
- // break the text into two parts
- tobe_pushed = remaining_text.slice(0, index_of_last_sep + 1);
- prev_text_already_pushed = prev_text_already_pushed + tobe_pushed;
- // console.log('[is continue], push: ', tobe_pushed)
- // console.log('[is continue], update prev_text_already_pushed: ', prev_text_already_pushed)
- if (!isEmptyOrWhitespaceOnly(tobe_pushed)) {
- // console.log('[is continue], remaining_text is empty')
- push_text_to_audio(tobe_pushed);
- }
- }
-}
-function process_latest_text_output(text, chatbot_index) {
- if (text.length == 0) {
- prev_text = text;
- prev_text_mask = text;
- // console.log('empty text')
- return;
- }
- if (text == prev_text) {
- // console.log('[nothing changed]')
- return;
- }
-
- var is_continue = is_continue_from_prev(text, prev_text_already_pushed);
- if (chatbot_index == prev_chatbot_index && is_continue) {
- // on_text_continue_grow
- remaining_text = text.slice(prev_text_already_pushed.length);
- process_increased_text(remaining_text);
- delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
- }
- else if (chatbot_index == prev_chatbot_index && !is_continue) {
- if (audio_debug) console.log('---------------------');
- if (audio_debug) console.log('text twisting!');
- if (audio_debug) console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed);
- if (audio_debug) console.log('---------------------');
- prev_text_already_pushed = "";
- delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
- }
- else {
- // on_new_message_begin, we have to clear `prev_text_already_pushed`
- if (audio_debug) console.log('---------------------');
- if (audio_debug) console.log('new message begin!');
- if (audio_debug) console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed);
- if (audio_debug) console.log('---------------------');
- prev_text_already_pushed = "";
- process_increased_text(text);
- delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
- }
- prev_text = text;
- prev_chatbot_index = chatbot_index;
-}
-
-const audio_push_lock = new FIFOLock();
-async function push_text_to_audio(text) {
- if (!allow_auto_read_tts_flag) {
- return;
- }
- await audio_push_lock.lock();
- var lines = text.split(/[\n。]/);
- for (const audio_buf_text of lines) {
- if (audio_buf_text) {
- // Append '/vits' to the current URL to form the target endpoint
- const url = `${window.location.href}vits`;
- // Define the payload to be sent in the POST request
- const payload = {
- text: audio_buf_text, // Ensure 'audio_buf_text' is defined with valid data
- text_language: "zh"
- };
- // Call the async postData function and log the response
- post_text(url, payload, send_index);
- send_index = send_index + 1;
- if (audio_debug) console.log(send_index, audio_buf_text);
- // sleep 2 seconds
- if (allow_auto_read_tts_flag) {
- await delay(3000);
- }
- }
- }
- audio_push_lock.unlock();
-}
-
-
-send_index = 0;
-recv_index = 0;
-to_be_processed = [];
-async function UpdatePlayQueue(cnt, audio_buf_wave) {
- if (cnt != recv_index) {
- to_be_processed.push([cnt, audio_buf_wave]);
- if (audio_debug) console.log('cache', cnt);
- }
- else {
- if (audio_debug) console.log('processing', cnt);
- recv_index = recv_index + 1;
- if (audio_buf_wave) {
- audioPlayer.enqueueAudio(audio_buf_wave);
- }
- // deal with other cached audio
- while (true) {
- find_any = false;
- for (i = to_be_processed.length - 1; i >= 0; i--) {
- if (to_be_processed[i][0] == recv_index) {
- if (audio_debug) console.log('processing cached', recv_index);
- if (to_be_processed[i][1]) {
- audioPlayer.enqueueAudio(to_be_processed[i][1]);
- }
- to_be_processed.pop(i);
- find_any = true;
- recv_index = recv_index + 1;
- }
- }
- if (!find_any) { break; }
- }
- }
-}
-
-function post_text(url, payload, cnt) {
- if (allow_auto_read_tts_flag) {
- postData(url, payload, cnt)
- .then(data => {
- UpdatePlayQueue(cnt, data);
- return;
- });
- } else {
- UpdatePlayQueue(cnt, null);
- return;
- }
-}
-
-notify_user_error = false
-// Create an async function to perform the POST request
-async function postData(url = '', data = {}) {
- try {
- // Use the Fetch API with await
- const response = await fetch(url, {
- method: 'POST', // Specify the request method
- body: JSON.stringify(data), // Convert the JavaScript object to a JSON string
- });
- // Check if the response is ok (status in the range 200-299)
- if (!response.ok) {
- // If not OK, throw an error
- console.info('There was a problem during audio generation requests:', response.status);
- // if (!notify_user_error){
- // notify_user_error = true;
- // alert('There was a problem during audio generation requests:', response.status);
- // }
- return null;
- }
- // If OK, parse and return the JSON response
- return await response.arrayBuffer();
- } catch (error) {
- // Log any errors that occur during the fetch operation
- console.info('There was a problem during audio generation requests:', error);
- // if (!notify_user_error){
- // notify_user_error = true;
- // alert('There was a problem during audio generation requests:', error);
- // }
- return null;
- }
-}
-
async function generate_menu(guiBase64String, btnName){
// assign the button and menu data
push_data_to_gradio_component(guiBase64String, "invisible_current_pop_up_plugin_arg", "string");
@@ -1642,32 +1304,66 @@ async function duplicate_in_new_window() {
}
async function run_classic_plugin_via_id(plugin_elem_id){
- // find elementid
for (key in plugin_init_info_lib){
if (plugin_init_info_lib[key].elem_id == plugin_elem_id){
+ // 获取按钮名称
let current_btn_name = await get_data_from_gradio_component(plugin_elem_id);
- console.log(current_btn_name);
-
- gui_args = {}
- // 关闭菜单 (如果处于开启状态)
- push_data_to_gradio_component({
- visible: false,
- __type__: 'update'
- }, "plugin_arg_menu", "obj");
- hide_all_elem();
- // 为了与旧插件兼容,生成菜单时,自动加载旧高级参数输入区的值
- let advance_arg_input_legacy = await get_data_from_gradio_component('advance_arg_input_legacy');
- if (advance_arg_input_legacy.length != 0){
- gui_args["advanced_arg"] = {};
- gui_args["advanced_arg"].user_confirmed_value = advance_arg_input_legacy;
- }
- // execute the plugin
- push_data_to_gradio_component(JSON.stringify(gui_args), "invisible_current_pop_up_plugin_arg_final", "string");
- push_data_to_gradio_component(current_btn_name, "invisible_callback_btn_for_plugin_exe", "string");
- document.getElementById("invisible_callback_btn_for_plugin_exe").click();
+ // 执行
+ call_plugin_via_name(current_btn_name);
return;
}
}
- // console.log('unable to find function');
return;
}
+
+async function call_plugin_via_name(current_btn_name) {
+ gui_args = {}
+ // 关闭菜单 (如果处于开启状态)
+ push_data_to_gradio_component({
+ visible: false,
+ __type__: 'update'
+ }, "plugin_arg_menu", "obj");
+ hide_all_elem();
+ // 为了与旧插件兼容,生成菜单时,自动加载旧高级参数输入区的值
+ let advance_arg_input_legacy = await get_data_from_gradio_component('advance_arg_input_legacy');
+ if (advance_arg_input_legacy.length != 0){
+ gui_args["advanced_arg"] = {};
+ gui_args["advanced_arg"].user_confirmed_value = advance_arg_input_legacy;
+ }
+ // execute the plugin
+ push_data_to_gradio_component(JSON.stringify(gui_args), "invisible_current_pop_up_plugin_arg_final", "string");
+ push_data_to_gradio_component(current_btn_name, "invisible_callback_btn_for_plugin_exe", "string");
+ document.getElementById("invisible_callback_btn_for_plugin_exe").click();
+}
+
+
+// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+// 多用途复用提交按钮
+// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+
+async function click_real_submit_btn() {
+ document.getElementById("elem_submit").click();
+}
+async function multiplex_function_begin(multiplex_sel) {
+ if (multiplex_sel === "常规对话") {
+ click_real_submit_btn();
+ return;
+ }
+ if (multiplex_sel === "多模型对话") {
+ let _align_name_in_crazy_function_py = "询问多个GPT模型";
+ call_plugin_via_name(_align_name_in_crazy_function_py);
+ return;
+ }
+}
+async function run_multiplex_shift(multiplex_sel){
+ let key = multiplex_sel;
+ if (multiplex_sel === "常规对话") {
+ key = "提交";
+ } else {
+ key = "提交 (" + multiplex_sel + ")";
+ }
+ push_data_to_gradio_component({
+ value: key,
+ __type__: 'update'
+ }, "elem_submit_visible", "obj");
+}
diff --git a/themes/common.py b/themes/common.py
index efae1a75..0b7b5bcc 100644
--- a/themes/common.py
+++ b/themes/common.py
@@ -30,7 +30,9 @@ def get_common_html_javascript_code():
common_js_path_list = [
"themes/common.js",
"themes/theme.js",
+ "themes/tts.js",
"themes/init.js",
+ "themes/welcome.js",
]
if ADD_WAIFU: # 添加Live2D
diff --git a/themes/green.css b/themes/green.css
index 85dd8013..1235b800 100644
--- a/themes/green.css
+++ b/themes/green.css
@@ -1,7 +1,7 @@
:root {
--chatbot-color-light: #000000;
--chatbot-color-dark: #FFFFFF;
- --chatbot-background-color-light: #F3F3F3;
+ --chatbot-background-color-light: #FFFFFF;
--chatbot-background-color-dark: #121111;
--message-user-background-color-light: #95EC69;
--message-user-background-color-dark: #26B561;
@@ -196,7 +196,7 @@ footer {
transition: opacity 0.3s ease-in-out;
}
textarea.svelte-1pie7s6 {
- background: #e7e6e6 !important;
+ background: #f1f1f1 !important;
width: 100% !important;
}
diff --git a/themes/green.py b/themes/green.py
index b16249a8..bd1179a3 100644
--- a/themes/green.py
+++ b/themes/green.py
@@ -62,11 +62,11 @@ def adjust_theme():
button_primary_text_color="white",
button_primary_text_color_dark="white",
button_secondary_background_fill="*neutral_100",
- button_secondary_background_fill_hover="*neutral_50",
+ button_secondary_background_fill_hover="#FEFEFE",
button_secondary_background_fill_dark="*neutral_900",
button_secondary_text_color="*neutral_800",
button_secondary_text_color_dark="white",
- background_fill_primary="*neutral_50",
+ background_fill_primary="#FEFEFE",
background_fill_primary_dark="#1F1F1F",
block_title_text_color="*primary_500",
block_title_background_fill_dark="*primary_900",
diff --git a/themes/gui_floating_menu.py b/themes/gui_floating_menu.py
index 003141f3..90947c5d 100644
--- a/themes/gui_floating_menu.py
+++ b/themes/gui_floating_menu.py
@@ -8,8 +8,10 @@ def define_gui_floating_menu(customize_btns, functional, predefined_btns, cookie
with gr.Column(scale=10):
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.",
elem_id='user_input_float', lines=8, label="输入区2").style(container=False)
+ txt2.submit(None, None, None, _js="""click_real_submit_btn""")
with gr.Column(scale=1, min_width=40):
submitBtn2 = gr.Button("提交", variant="primary"); submitBtn2.style(size="sm")
+ submitBtn2.click(None, None, None, _js="""click_real_submit_btn""")
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
clearBtn2 = gr.Button("清除", elem_id="elem_clear2", variant="secondary", visible=False); clearBtn2.style(size="sm")
diff --git a/themes/init.js b/themes/init.js
index 0f5711bb..9ea8dd5a 100644
--- a/themes/init.js
+++ b/themes/init.js
@@ -2,11 +2,18 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) {
// 第一部分,布局初始化
audio_fn_init();
minor_ui_adjustment();
+ ButtonWithDropdown_init();
+
+ // 加载欢迎页面
+ const welcomeMessage = new WelcomeMessage();
+ welcomeMessage.begin_render();
chatbotIndicator = gradioApp().querySelector('#gpt-chatbot > div.wrap');
var chatbotObserver = new MutationObserver(() => {
chatbotContentChanged(1);
+ welcomeMessage.update();
});
chatbotObserver.observe(chatbotIndicator, { attributes: true, childList: true, subtree: true });
+
if (layout === "LEFT-RIGHT") { chatbotAutoHeight(); }
if (layout === "LEFT-RIGHT") { limit_scroll_position(); }
@@ -122,4 +129,5 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) {
// 主题加载(恢复到上次)
change_theme("", "")
+
}
diff --git a/themes/mermaid.min.js b/themes/mermaid.min.js
deleted file mode 100644
index b842822b..00000000
--- a/themes/mermaid.min.js
+++ /dev/null
@@ -1 +0,0 @@
-// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0
diff --git a/themes/mermaid_editor.js b/themes/mermaid_editor.js
deleted file mode 100644
index b842822b..00000000
--- a/themes/mermaid_editor.js
+++ /dev/null
@@ -1 +0,0 @@
-// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0
diff --git a/themes/mermaid_loader.js b/themes/mermaid_loader.js
deleted file mode 100644
index b842822b..00000000
--- a/themes/mermaid_loader.js
+++ /dev/null
@@ -1 +0,0 @@
-// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0
diff --git a/themes/pako.esm.mjs b/themes/pako.esm.mjs
deleted file mode 100644
index b842822b..00000000
--- a/themes/pako.esm.mjs
+++ /dev/null
@@ -1 +0,0 @@
-// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0
diff --git a/themes/sovits_audio.js b/themes/sovits_audio.js
deleted file mode 100644
index e69de29b..00000000
diff --git a/themes/svg/arxiv.svg b/themes/svg/arxiv.svg
new file mode 100644
index 00000000..d4adb4cf
--- /dev/null
+++ b/themes/svg/arxiv.svg
@@ -0,0 +1,7 @@
+
+
\ No newline at end of file
diff --git a/themes/svg/brain.svg b/themes/svg/brain.svg
new file mode 100644
index 00000000..82e38d2e
--- /dev/null
+++ b/themes/svg/brain.svg
@@ -0,0 +1,8 @@
+
+
\ No newline at end of file
diff --git a/themes/svg/conf.svg b/themes/svg/conf.svg
new file mode 100644
index 00000000..257fabd2
--- /dev/null
+++ b/themes/svg/conf.svg
@@ -0,0 +1,8 @@
+
+
\ No newline at end of file
diff --git a/themes/svg/default.svg b/themes/svg/default.svg
new file mode 100644
index 00000000..990eda38
--- /dev/null
+++ b/themes/svg/default.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/themes/svg/doc.svg b/themes/svg/doc.svg
new file mode 100644
index 00000000..d28ab5c1
--- /dev/null
+++ b/themes/svg/doc.svg
@@ -0,0 +1,9 @@
+
+
\ No newline at end of file
diff --git a/themes/svg/img.svg b/themes/svg/img.svg
new file mode 100644
index 00000000..9864a3e9
--- /dev/null
+++ b/themes/svg/img.svg
@@ -0,0 +1,16 @@
+
+
\ No newline at end of file
diff --git a/themes/svg/mm.svg b/themes/svg/mm.svg
new file mode 100644
index 00000000..66a9332c
--- /dev/null
+++ b/themes/svg/mm.svg
@@ -0,0 +1,10 @@
+
+
\ No newline at end of file
diff --git a/themes/svg/polish.svg b/themes/svg/polish.svg
new file mode 100644
index 00000000..41bfe03f
--- /dev/null
+++ b/themes/svg/polish.svg
@@ -0,0 +1,7 @@
+
+
\ No newline at end of file
diff --git a/themes/svg/tts.svg b/themes/svg/tts.svg
new file mode 100644
index 00000000..dcc0bd3a
--- /dev/null
+++ b/themes/svg/tts.svg
@@ -0,0 +1,7 @@
+
+
\ No newline at end of file
diff --git a/themes/svg/vt.svg b/themes/svg/vt.svg
new file mode 100644
index 00000000..22358ad1
--- /dev/null
+++ b/themes/svg/vt.svg
@@ -0,0 +1,8 @@
+
+
\ No newline at end of file
diff --git a/themes/tts.js b/themes/tts.js
new file mode 100644
index 00000000..d9ae219a
--- /dev/null
+++ b/themes/tts.js
@@ -0,0 +1,351 @@
+// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+// TTS语音生成函数
+// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+audio_debug = false;
+class AudioPlayer {
+ constructor() {
+ this.audioCtx = new (window.AudioContext || window.webkitAudioContext)();
+ this.queue = [];
+ this.isPlaying = false;
+ this.currentSource = null; // 添加属性来保存当前播放的源
+ }
+
+ // Base64 编码的字符串转换为 ArrayBuffer
+ base64ToArrayBuffer(base64) {
+ const binaryString = window.atob(base64);
+ const len = binaryString.length;
+ const bytes = new Uint8Array(len);
+ for (let i = 0; i < len; i++) {
+ bytes[i] = binaryString.charCodeAt(i);
+ }
+ return bytes.buffer;
+ }
+
+ // 检查音频播放队列并播放音频
+ checkQueue() {
+ if (!this.isPlaying && this.queue.length > 0) {
+ this.isPlaying = true;
+ const nextAudio = this.queue.shift();
+ this.play_wave(nextAudio);
+ }
+ }
+
+ // 将音频添加到播放队列
+ enqueueAudio(audio_buf_wave) {
+ if (allow_auto_read_tts_flag) {
+ this.queue.push(audio_buf_wave);
+ this.checkQueue();
+ }
+ }
+
+ // 播放音频
+ async play_wave(encodedAudio) {
+ //const audioData = this.base64ToArrayBuffer(encodedAudio);
+ const audioData = encodedAudio;
+ try {
+ const buffer = await this.audioCtx.decodeAudioData(audioData);
+ const source = this.audioCtx.createBufferSource();
+ source.buffer = buffer;
+ source.connect(this.audioCtx.destination);
+ source.onended = () => {
+ if (allow_auto_read_tts_flag) {
+ this.isPlaying = false;
+ this.currentSource = null; // 播放结束后清空当前源
+ this.checkQueue();
+ }
+ };
+ this.currentSource = source; // 保存当前播放的源
+ source.start();
+ } catch (e) {
+ console.log("Audio error!", e);
+ this.isPlaying = false;
+ this.currentSource = null; // 出错时也应清空当前源
+ this.checkQueue();
+ }
+ }
+
+ // 新增:立即停止播放音频的方法
+ stop() {
+ if (this.currentSource) {
+ this.queue = []; // 清空队列
+ this.currentSource.stop(); // 停止当前源
+ this.currentSource = null; // 清空当前源
+ this.isPlaying = false; // 更新播放状态
+ // 关闭音频上下文可能会导致无法再次播放音频,因此仅停止当前源
+ // this.audioCtx.close(); // 可选:如果需要可以关闭音频上下文
+ }
+ }
+}
+
+const audioPlayer = new AudioPlayer();
+
+class FIFOLock {
+ constructor() {
+ this.queue = [];
+ this.currentTaskExecuting = false;
+ }
+
+ lock() {
+ let resolveLock;
+ const lock = new Promise(resolve => {
+ resolveLock = resolve;
+ });
+
+ this.queue.push(resolveLock);
+
+ if (!this.currentTaskExecuting) {
+ this._dequeueNext();
+ }
+
+ return lock;
+ }
+
+ _dequeueNext() {
+ if (this.queue.length === 0) {
+ this.currentTaskExecuting = false;
+ return;
+ }
+ this.currentTaskExecuting = true;
+ const resolveLock = this.queue.shift();
+ resolveLock();
+ }
+
+ unlock() {
+ this.currentTaskExecuting = false;
+ this._dequeueNext();
+ }
+}
+
+
+
+
+
+
+
+
+function delay(ms) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+// Define the trigger function with delay parameter T in milliseconds
+function trigger(T, fire) {
+ // Variable to keep track of the timer ID
+ let timeoutID = null;
+ // Variable to store the latest arguments
+ let lastArgs = null;
+
+ return function (...args) {
+ // Update lastArgs with the latest arguments
+ lastArgs = args;
+ // Clear the existing timer if the function is called again
+ if (timeoutID !== null) {
+ clearTimeout(timeoutID);
+ }
+ // Set a new timer that calls the `fire` function with the latest arguments after T milliseconds
+ timeoutID = setTimeout(() => {
+ fire(...lastArgs);
+ }, T);
+ };
+}
+
+
+prev_text = ""; // previous text, this is used to check chat changes
+prev_text_already_pushed = ""; // previous text already pushed to audio, this is used to check where we should continue to play audio
+prev_chatbot_index = -1;
+const delay_live_text_update = trigger(3000, on_live_stream_terminate);
+
+function on_live_stream_terminate(latest_text) {
+ // remove `prev_text_already_pushed` from `latest_text`
+ if (audio_debug) console.log("on_live_stream_terminate", latest_text);
+ remaining_text = latest_text.slice(prev_text_already_pushed.length);
+ if ((!isEmptyOrWhitespaceOnly(remaining_text)) && remaining_text.length != 0) {
+ prev_text_already_pushed = latest_text;
+ push_text_to_audio(remaining_text);
+ }
+}
+function is_continue_from_prev(text, prev_text) {
+ abl = 5
+ if (text.length < prev_text.length - abl) {
+ return false;
+ }
+ if (prev_text.length > 10) {
+ return text.startsWith(prev_text.slice(0, Math.min(prev_text.length - abl, 100)));
+ } else {
+ return text.startsWith(prev_text);
+ }
+}
+function isEmptyOrWhitespaceOnly(remaining_text) {
+ // Replace \n and 。 with empty strings
+ let textWithoutSpecifiedCharacters = remaining_text.replace(/[\n。]/g, '');
+ // Check if the remaining string is empty
+ return textWithoutSpecifiedCharacters.trim().length === 0;
+}
+function process_increased_text(remaining_text) {
+ // console.log('[is continue], remaining_text: ', remaining_text)
+ // remaining_text starts with \n or 。, then move these chars into prev_text_already_pushed
+ while (remaining_text.startsWith('\n') || remaining_text.startsWith('。')) {
+ prev_text_already_pushed = prev_text_already_pushed + remaining_text[0];
+ remaining_text = remaining_text.slice(1);
+ }
+ if (remaining_text.includes('\n') || remaining_text.includes('。')) { // determine remaining_text contain \n or 。
+ // new message begin!
+ index_of_last_sep = Math.max(remaining_text.lastIndexOf('\n'), remaining_text.lastIndexOf('。'));
+ // break the text into two parts
+ tobe_pushed = remaining_text.slice(0, index_of_last_sep + 1);
+ prev_text_already_pushed = prev_text_already_pushed + tobe_pushed;
+ // console.log('[is continue], push: ', tobe_pushed)
+ // console.log('[is continue], update prev_text_already_pushed: ', prev_text_already_pushed)
+ if (!isEmptyOrWhitespaceOnly(tobe_pushed)) {
+ // console.log('[is continue], remaining_text is empty')
+ push_text_to_audio(tobe_pushed);
+ }
+ }
+}
+function process_latest_text_output(text, chatbot_index) {
+ if (text.length == 0) {
+ prev_text = text;
+ prev_text_mask = text;
+ // console.log('empty text')
+ return;
+ }
+ if (text == prev_text) {
+ // console.log('[nothing changed]')
+ return;
+ }
+
+ var is_continue = is_continue_from_prev(text, prev_text_already_pushed);
+ if (chatbot_index == prev_chatbot_index && is_continue) {
+ // on_text_continue_grow
+ remaining_text = text.slice(prev_text_already_pushed.length);
+ process_increased_text(remaining_text);
+ delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
+ }
+ else if (chatbot_index == prev_chatbot_index && !is_continue) {
+ if (audio_debug) console.log('---------------------');
+ if (audio_debug) console.log('text twisting!');
+ if (audio_debug) console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed);
+ if (audio_debug) console.log('---------------------');
+ prev_text_already_pushed = "";
+ delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
+ }
+ else {
+ // on_new_message_begin, we have to clear `prev_text_already_pushed`
+ if (audio_debug) console.log('---------------------');
+ if (audio_debug) console.log('new message begin!');
+ if (audio_debug) console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed);
+ if (audio_debug) console.log('---------------------');
+ prev_text_already_pushed = "";
+ process_increased_text(text);
+ delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
+ }
+ prev_text = text;
+ prev_chatbot_index = chatbot_index;
+}
+
+const audio_push_lock = new FIFOLock();
+async function push_text_to_audio(text) {
+ if (!allow_auto_read_tts_flag) {
+ return;
+ }
+ await audio_push_lock.lock();
+ var lines = text.split(/[\n。]/);
+ for (const audio_buf_text of lines) {
+ if (audio_buf_text) {
+ // Append '/vits' to the current URL to form the target endpoint
+ const url = `${window.location.href}vits`;
+ // Define the payload to be sent in the POST request
+ const payload = {
+ text: audio_buf_text, // Ensure 'audio_buf_text' is defined with valid data
+ text_language: "zh"
+ };
+ // Call the async postData function and log the response
+ post_text(url, payload, send_index);
+ send_index = send_index + 1;
+ if (audio_debug) console.log(send_index, audio_buf_text);
+ // sleep 2 seconds
+ if (allow_auto_read_tts_flag) {
+ await delay(3000);
+ }
+ }
+ }
+ audio_push_lock.unlock();
+}
+
+
+send_index = 0;
+recv_index = 0;
+to_be_processed = [];
+async function UpdatePlayQueue(cnt, audio_buf_wave) {
+ if (cnt != recv_index) {
+ to_be_processed.push([cnt, audio_buf_wave]);
+ if (audio_debug) console.log('cache', cnt);
+ }
+ else {
+ if (audio_debug) console.log('processing', cnt);
+ recv_index = recv_index + 1;
+ if (audio_buf_wave) {
+ audioPlayer.enqueueAudio(audio_buf_wave);
+ }
+ // deal with other cached audio
+ while (true) {
+ find_any = false;
+ for (i = to_be_processed.length - 1; i >= 0; i--) {
+ if (to_be_processed[i][0] == recv_index) {
+ if (audio_debug) console.log('processing cached', recv_index);
+ if (to_be_processed[i][1]) {
+ audioPlayer.enqueueAudio(to_be_processed[i][1]);
+ }
+ to_be_processed.pop(i);
+ find_any = true;
+ recv_index = recv_index + 1;
+ }
+ }
+ if (!find_any) { break; }
+ }
+ }
+}
+
+function post_text(url, payload, cnt) {
+ if (allow_auto_read_tts_flag) {
+ postData(url, payload, cnt)
+ .then(data => {
+ UpdatePlayQueue(cnt, data);
+ return;
+ });
+ } else {
+ UpdatePlayQueue(cnt, null);
+ return;
+ }
+}
+
+notify_user_error = false
+// Create an async function to perform the POST request
+async function postData(url = '', data = {}) {
+ try {
+ // Use the Fetch API with await
+ const response = await fetch(url, {
+ method: 'POST', // Specify the request method
+ body: JSON.stringify(data), // Convert the JavaScript object to a JSON string
+ });
+ // Check if the response is ok (status in the range 200-299)
+ if (!response.ok) {
+ // If not OK, throw an error
+ console.info('There was a problem during audio generation requests:', response.status);
+ // if (!notify_user_error){
+ // notify_user_error = true;
+ // alert('There was a problem during audio generation requests:', response.status);
+ // }
+ return null;
+ }
+ // If OK, parse and return the JSON response
+ return await response.arrayBuffer();
+ } catch (error) {
+ // Log any errors that occur during the fetch operation
+ console.info('There was a problem during audio generation requests:', error);
+ // if (!notify_user_error){
+ // notify_user_error = true;
+ // alert('There was a problem during audio generation requests:', error);
+ // }
+ return null;
+ }
+}
\ No newline at end of file
diff --git a/themes/welcome.js b/themes/welcome.js
new file mode 100644
index 00000000..a9baea6e
--- /dev/null
+++ b/themes/welcome.js
@@ -0,0 +1,271 @@
+class WelcomeMessage {
+ constructor() {
+ this.static_welcome_message = [
+ {
+ title: "环境配置教程",
+ content: "配置模型和插件,释放大语言模型的学术应用潜力。",
+ svg: "file=themes/svg/conf.svg",
+ url: "https://github.com/binary-husky/gpt_academic/wiki/%E9%A1%B9%E7%9B%AE%E9%85%8D%E7%BD%AE%E8%AF%B4%E6%98%8E",
+ },
+ {
+ title: "Arxiv论文一键翻译",
+ content: "无缝切换学术阅读语言,最优英文转中文的学术论文阅读体验。",
+ svg: "file=themes/svg/arxiv.svg",
+ url: "https://www.bilibili.com/video/BV1dz4y1v77A/",
+ },
+ {
+ title: "多模态模型",
+ content: "试试将截屏直接粘贴到输入框中,随后使用多模态模型提问。",
+ svg: "file=themes/svg/mm.svg",
+ url: "https://github.com/binary-husky/gpt_academic",
+ },
+ {
+ title: "文档与源码批处理",
+ content: "您可以将任意文件拖入「此处」,随后调用对应插件功能。",
+ svg: "file=themes/svg/doc.svg",
+ url: "https://github.com/binary-husky/gpt_academic",
+ },
+ {
+ title: "图表与脑图绘制",
+ content: "试试输入一段语料,然后点击「总结绘制脑图」。",
+ svg: "file=themes/svg/brain.svg",
+ url: "https://www.bilibili.com/video/BV18c41147H9/",
+ },
+ {
+ title: "虚空终端",
+ content: "点击右侧插件区的「虚空终端」插件,然后直接输入您的想法。",
+ svg: "file=themes/svg/vt.svg",
+ url: "https://github.com/binary-husky/gpt_academic",
+ },
+ {
+ title: "DALLE图像生成",
+ content: "接入DALLE生成插画或者项目Logo,辅助头脑风暴并激发灵感。",
+ svg: "file=themes/svg/img.svg",
+ url: "https://github.com/binary-husky/gpt_academic",
+ },
+ {
+ title: "TTS语音克隆",
+ content: "借助SoVits,以您喜爱的角色的声音回答问题。",
+ svg: "file=themes/svg/tts.svg",
+ url: "https://www.bilibili.com/video/BV1Rp421S7tF/",
+ },
+ {
+ title: "实时语音对话",
+ content: "配置实时语音对话功能,无须任何激活词,我将一直倾听。",
+ svg: "file=themes/svg/default.svg",
+ url: "https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md",
+ },
+ {
+ title: "Latex全文润色",
+ content: "上传需要润色的latex论文,让大语言模型帮您改论文。",
+ svg: "file=themes/svg/polish.svg",
+ url: "https://github.com/binary-husky/gpt_academic",
+ }
+ ];
+ this.visible = false;
+ this.max_welcome_card_num = 6;
+ this.card_array = [];
+ this.static_welcome_message_previous = [];
+ this.reflesh_time_interval = 15*1000;
+ }
+
+ begin_render() {
+ this.update();
+ }
+
+ async startRefleshCards() {
+ await new Promise(r => setTimeout(r, this.reflesh_time_interval));
+ await this.reflesh_cards();
+ if (this.visible){
+ setTimeout(() => {
+ this.startRefleshCards.call(this);
+ }, 1);
+ }
+ }
+
+ async reflesh_cards() {
+ if (!this.visible){
+ return;
+ }
+
+ // re-rank this.static_welcome_message randomly
+ this.static_welcome_message_temp = this.shuffle(this.static_welcome_message);
+
+ // find items that in this.static_welcome_message_temp but not in this.static_welcome_message_previous
+ const not_shown_previously = this.static_welcome_message_temp.filter(item => !this.static_welcome_message_previous.includes(item));
+ const already_shown_previously = this.static_welcome_message_temp.filter(item => this.static_welcome_message_previous.includes(item));
+
+ // combine two lists
+ this.static_welcome_message_previous = not_shown_previously.concat(already_shown_previously);
+
+ (async () => {
+ // 使用 for...of 循环来处理异步操作
+ for (let index = 0; index < this.card_array.length; index++) {
+ if (index >= this.max_welcome_card_num) {
+ break;
+ }
+
+ const card = this.card_array[index];
+ card.classList.remove('hide');
+ card.classList.remove('show');
+
+ // 等待动画结束
+ card.addEventListener('transitionend', () => {
+ // 更新卡片信息
+ const message = this.static_welcome_message_previous[index];
+ const title = card.getElementsByClassName('welcome-card-title')[0];
+ const content = card.getElementsByClassName('welcome-content-c')[0];
+ const svg = card.getElementsByClassName('welcome-svg')[0];
+ const text = card.getElementsByClassName('welcome-title-text')[0];
+ svg.src = message.svg;
+ text.textContent = message.title;
+ text.href = message.url;
+ content.textContent = message.content;
+ card.classList.remove('hide');
+
+ // 等待动画结束
+ card.addEventListener('transitionend', () => {
+ card.classList.remove('show');
+ }, { once: true });
+ card.classList.add('show');
+
+ }, { once: true });
+
+ card.classList.add('hide');
+
+ // 等待 250 毫秒
+ await new Promise(r => setTimeout(r, 200));
+ }
+ })();
+ }
+
+ shuffle(array) {
+ var currentIndex = array.length, randomIndex;
+
+ // While there remain elements to shuffle...
+ while (currentIndex != 0) {
+
+ // Pick a remaining element...
+ randomIndex = Math.floor(Math.random() * currentIndex);
+ currentIndex--;
+
+ // And swap it with the current element.
+ [array[currentIndex], array[randomIndex]] = [
+ array[randomIndex], array[currentIndex]];
+ }
+
+ return array;
+ }
+
+ async update() {
+ console.log('update')
+ var page_width = document.documentElement.clientWidth;
+ const width_to_hide_welcome = 1200;
+ if (!await this.isChatbotEmpty() || page_width < width_to_hide_welcome) {
+ if (this.visible) {
+ this.removeWelcome();
+ this.visible = false;
+ this.card_array = [];
+ this.static_welcome_message_previous = [];
+ }
+ return;
+ }
+ if (this.visible){
+ return;
+ }
+ // console.log("welcome");
+ this.showWelcome();
+ this.visible = true;
+ this.startRefleshCards();
+ }
+
+ showCard(message) {
+ const card = document.createElement('div');
+ card.classList.add('welcome-card');
+
+ // 创建标题
+ const title = document.createElement('div');
+ title.classList.add('welcome-card-title');
+
+ // 创建图标
+ const svg = document.createElement('img');
+ svg.classList.add('welcome-svg');
+ svg.src = message.svg;
+ svg.style.height = '30px';
+ title.appendChild(svg);
+
+ // 创建标题
+ const text = document.createElement('a');
+ text.textContent = message.title;
+ text.classList.add('welcome-title-text');
+ text.href = message.url;
+ text.target = "_blank";
+ title.appendChild(text)
+
+ // 创建内容
+ const content = document.createElement('div');
+ content.classList.add('welcome-content');
+ const content_c = document.createElement('div');
+ content_c.classList.add('welcome-content-c');
+ content_c.textContent = message.content;
+ content.appendChild(content_c);
+
+ // 将标题和内容添加到卡片 div 中
+ card.appendChild(title);
+ card.appendChild(content);
+ return card;
+ }
+
+ async showWelcome() {
+
+ // 首先,找到想要添加子元素的父元素
+ const elem_chatbot = document.getElementById('gpt-chatbot');
+
+ // 创建一个新的div元素
+ const welcome_card_container = document.createElement('div');
+ welcome_card_container.classList.add('welcome-card-container');
+
+ // 创建主标题
+ const major_title = document.createElement('div');
+ major_title.classList.add('welcome-title');
+ major_title.textContent = "欢迎使用GPT-Academic";
+ welcome_card_container.appendChild(major_title)
+
+ // 创建卡片
+ this.static_welcome_message.forEach((message, index) => {
+ if (index >= this.max_welcome_card_num) {
+ return;
+ }
+ this.static_welcome_message_previous.push(message);
+ const card = this.showCard(message);
+ this.card_array.push(card);
+ welcome_card_container.appendChild(card);
+ });
+
+ elem_chatbot.appendChild(welcome_card_container);
+
+ // 添加显示动画
+ requestAnimationFrame(() => {
+ welcome_card_container.classList.add('show');
+ });
+ }
+
+ async removeWelcome() {
+ // remove welcome-card-container
+ const elem_chatbot = document.getElementById('gpt-chatbot');
+ const welcome_card_container = document.getElementsByClassName('welcome-card-container')[0];
+ // 添加隐藏动画
+ welcome_card_container.classList.add('hide');
+ // 等待动画结束后再移除元素
+ welcome_card_container.addEventListener('transitionend', () => {
+ elem_chatbot.removeChild(welcome_card_container);
+ }, { once: true });
+ }
+
+ async isChatbotEmpty() {
+ return (await get_data_from_gradio_component("gpt-chatbot")).length == 0;
+ }
+
+
+}
+
diff --git a/version b/version
index dd14ea0f..eea15c46 100644
--- a/version
+++ b/version
@@ -1,5 +1,5 @@
{
- "version": 3.82,
+ "version": 3.83,
"show_feature": true,
"new_feature": "优化图像生成插件 <-> 添加紫东太初大模型支持 <-> 保留主题选择 <-> 支持更复杂的插件框架 <-> 上传文件时显示进度条"
}