镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-07 15:06:48 +00:00
比较提交
3 次代码提交
version3.5
...
version3.5
| 作者 | SHA1 | 提交日期 | |
|---|---|---|---|
|
|
760ff1840c | ||
|
|
9905122fc2 | ||
|
|
abea0d07ac |
@@ -715,7 +715,7 @@ class nougat_interface():
|
||||
|
||||
def nougat_with_timeout(self, command, cwd, timeout=3600):
|
||||
import subprocess
|
||||
logging.info('正在执行命令', command)
|
||||
logging.info(f'正在执行命令 {command}')
|
||||
process = subprocess.Popen(command, shell=True, cwd=cwd)
|
||||
try:
|
||||
stdout, stderr = process.communicate(timeout=timeout)
|
||||
|
||||
@@ -291,7 +291,11 @@ def find_tex_file_ignore_case(fp):
|
||||
import glob
|
||||
for f in glob.glob(dir_name+'/*.tex'):
|
||||
base_name_s = os.path.basename(fp)
|
||||
if base_name_s.lower() == base_name.lower(): return f
|
||||
base_name_f = os.path.basename(f)
|
||||
if base_name_s.lower() == base_name_f.lower(): return f
|
||||
# 试着加上.tex后缀试试
|
||||
if not base_name_s.endswith('.tex'): base_name_s+='.tex'
|
||||
if base_name_s.lower() == base_name_f.lower(): return f
|
||||
return None
|
||||
|
||||
def merge_tex_files_(project_foler, main_file, mode):
|
||||
@@ -302,9 +306,9 @@ def merge_tex_files_(project_foler, main_file, mode):
|
||||
for s in reversed([q for q in re.finditer(r"\\input\{(.*?)\}", main_file, re.M)]):
|
||||
f = s.group(1)
|
||||
fp = os.path.join(project_foler, f)
|
||||
fp = find_tex_file_ignore_case(fp)
|
||||
if fp:
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as fx: c = fx.read()
|
||||
fp_ = find_tex_file_ignore_case(fp)
|
||||
if fp_:
|
||||
with open(fp_, 'r', encoding='utf-8', errors='replace') as fx: c = fx.read()
|
||||
else:
|
||||
raise RuntimeError(f'找不到{fp},Tex源文件缺失!')
|
||||
c = merge_tex_files_(project_foler, c, mode)
|
||||
|
||||
@@ -72,6 +72,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
|
||||
stream_response = response.iter_lines()
|
||||
result = ''
|
||||
json_data = None
|
||||
while True:
|
||||
try: chunk = next(stream_response).decode()
|
||||
except StopIteration:
|
||||
@@ -90,20 +91,21 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
delta = json_data["delta"]
|
||||
if len(delta) == 0: break
|
||||
if "role" in delta: continue
|
||||
if "content" in delta:
|
||||
if "content" in delta:
|
||||
result += delta["content"]
|
||||
if not console_slience: print(delta["content"], end='')
|
||||
if observe_window is not None:
|
||||
# 观测窗,把已经获取的数据显示出去
|
||||
if len(observe_window) >= 1: observe_window[0] += delta["content"]
|
||||
if len(observe_window) >= 1:
|
||||
observe_window[0] += delta["content"]
|
||||
# 看门狗,如果超过期限没有喂狗,则终止
|
||||
if len(observe_window) >= 2:
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||
raise RuntimeError("用户取消了程序。")
|
||||
else: raise RuntimeError("意外Json结构:"+delta)
|
||||
if json_data['finish_reason'] == 'content_filter':
|
||||
if json_data and json_data['finish_reason'] == 'content_filter':
|
||||
raise RuntimeError("由于提问含不合规内容被Azure过滤。")
|
||||
if json_data['finish_reason'] == 'length':
|
||||
if json_data and json_data['finish_reason'] == 'length':
|
||||
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
|
||||
return result
|
||||
|
||||
@@ -205,7 +207,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
chunkjson = json.loads(chunk_decoded[6:])
|
||||
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||
# 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
|
||||
gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk_decoded[6:])['choices'][0]["delta"]["content"]
|
||||
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
||||
history[-1] = gpt_replying_buffer
|
||||
chatbot[-1] = (history[-2], history[-1])
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
||||
|
||||
在新工单中引用
屏蔽一个用户