镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 14:36:48 +00:00
typo: Fix typos and rename functions across multiple files (#2130)
* typo: Fix typos and rename functions across multiple files This commit addresses several minor issues: - Corrected spelling of function names (e.g., `update_ui_lastest_msg` to `update_ui_latest_msg`) - Fixed typos in comments and variable names - Corrected capitalization in some strings (e.g., "ArXiv" instead of "Arixv") - Renamed some variables for consistency - Corrected some console-related parameter names (e.g., `console_slience` to `console_silence`) The changes span multiple files across the project, including request LLM bridges, crazy functions, and utility modules. * fix: f-string expression part cannot include a backslash (#2139) * raise error when the uploaded tar contain hard/soft link (#2136) * minor bug fix * fine tune reasoning css * upgrade internet gpt plugin * Update README.md * fix GHSA-gqp5-wm97-qxcv * typo fix * update readme --------- Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> Co-authored-by: binary-husky <qingxu.fu@outlook.com>
这个提交包含在:
@@ -91,7 +91,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
inputs 是本次问询的输入
|
||||
top_p, temperature是chatGPT的内部调优参数
|
||||
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
||||
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||
chatbot 为WebUI中显示的对话列表,修改它,然后yield出去,可以直接修改对话界面内容
|
||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||
"""
|
||||
if additional_fn is not None:
|
||||
@@ -112,7 +112,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
|
||||
mutable = ["", time.time()]
|
||||
def run_coorotine(mutable):
|
||||
def run_coroutine(mutable):
|
||||
async def get_result(mutable):
|
||||
# "tgui:galactica-1.3b@localhost:7860"
|
||||
|
||||
@@ -126,7 +126,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
break
|
||||
asyncio.run(get_result(mutable))
|
||||
|
||||
thread_listen = threading.Thread(target=run_coorotine, args=(mutable,), daemon=True)
|
||||
thread_listen = threading.Thread(target=run_coroutine, args=(mutable,), daemon=True)
|
||||
thread_listen.start()
|
||||
|
||||
while thread_listen.is_alive():
|
||||
@@ -142,7 +142,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_silence=False):
|
||||
raw_input = "What I would like to say is the following: " + inputs
|
||||
prompt = raw_input
|
||||
tgui_say = ""
|
||||
@@ -151,7 +151,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
||||
addr, port = addr_port.split(':')
|
||||
|
||||
|
||||
def run_coorotine(observe_window):
|
||||
def run_coroutine(observe_window):
|
||||
async def get_result(observe_window):
|
||||
async for response in run(context=prompt, max_token=llm_kwargs['max_length'],
|
||||
temperature=llm_kwargs['temperature'],
|
||||
@@ -162,6 +162,6 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
||||
print('exit when no listener')
|
||||
break
|
||||
asyncio.run(get_result(observe_window))
|
||||
thread_listen = threading.Thread(target=run_coorotine, args=(observe_window,))
|
||||
thread_listen = threading.Thread(target=run_coroutine, args=(observe_window,))
|
||||
thread_listen.start()
|
||||
return observe_window[0]
|
||||
|
||||
在新工单中引用
屏蔽一个用户