Merge Latest Frontier (#1991)

* logging sys to loguru: stage 1 complete

* import loguru: stage 2

* logging -> loguru: stage 3

* support o1-preview and o1-mini

* logging -> loguru stage 4

* update social helper

* logging -> loguru: final stage

* fix: console output

* update translation matrix

* fix: loguru argument error with proxy enabled (#1977)

* relax llama index version

* remove comment

* Added some modules to support openrouter (#1975)

* Added some modules for supporting openrouter model

Added some modules for supporting openrouter model

* Update config.py

* Update .gitignore

* Update bridge_openrouter.py

* Not changed actually

* Refactor logging in bridge_openrouter.py

---------

Co-authored-by: binary-husky <qingxu.fu@outlook.com>

* remove logging extra

---------

Co-authored-by: Steven Moder <java20131114@gmail.com>
Co-authored-by: Ren Lifei <2602264455@qq.com>
这个提交包含在:
binary-husky
2024-10-05 17:09:18 +08:00
提交者 GitHub
父节点 597c320808
当前提交 a01ca93362
共有 91 个文件被更改,包括 2558 次插入742 次删除

查看文件

@@ -1,8 +1,8 @@
import json
import time
import logging
import traceback
import requests
from loguru import logger
# config_private.py放自己的秘密如API和代理网址
# 读取时首先看是否存在私密的config_private配置文件不受git管控,如果有,则覆盖原config文件
@@ -106,10 +106,7 @@ def generate_message(input, model, key, history, max_output_token, system_prompt
"stream": True,
"max_tokens": max_output_token,
}
try:
print(f" {model} : {conversation_cnt} : {input[:100]} ..........")
except:
print("输入中可能存在乱码。")
return headers, playload
@@ -196,7 +193,7 @@ def get_predict_function(
if retry > MAX_RETRY:
raise TimeoutError
if MAX_RETRY != 0:
print(f"请求超时,正在重试 ({retry}/{MAX_RETRY}) ……")
logger.error(f"请求超时,正在重试 ({retry}/{MAX_RETRY}) ……")
stream_response = response.iter_lines()
result = ""
@@ -219,18 +216,17 @@ def get_predict_function(
):
chunk = get_full_error(chunk, stream_response)
chunk_decoded = chunk.decode()
print(chunk_decoded)
logger.error(chunk_decoded)
raise RuntimeError(
f"API异常,请检测终端输出。可能的原因是:{finish_reason}"
)
if chunk:
try:
if finish_reason == "stop":
logging.info(f"[response] {result}")
if not console_slience:
print(f"[response] {result}")
break
result += response_text
if not console_slience:
print(response_text, end="")
if observe_window is not None:
# 观测窗,把已经获取的数据显示出去
if len(observe_window) >= 1:
@@ -243,7 +239,7 @@ def get_predict_function(
chunk = get_full_error(chunk, stream_response)
chunk_decoded = chunk.decode()
error_msg = chunk_decoded
print(error_msg)
logger.error(error_msg)
raise RuntimeError("Json解析不合常规")
return result
@@ -276,7 +272,7 @@ def get_predict_function(
inputs, history = handle_core_functionality(
additional_fn, inputs, history, chatbot
)
logging.info(f"[raw_input] {inputs}")
logger.info(f"[raw_input] {inputs}")
chatbot.append((inputs, ""))
yield from update_ui(
chatbot=chatbot, history=history, msg="等待响应"
@@ -376,11 +372,11 @@ def get_predict_function(
history=history,
msg="API异常:" + chunk_decoded,
) # 刷新界面
print(chunk_decoded)
logger.error(chunk_decoded)
return
if finish_reason == "stop":
logging.info(f"[response] {gpt_replying_buffer}")
logger.info(f"[response] {gpt_replying_buffer}")
break
status_text = f"finish_reason: {finish_reason}"
gpt_replying_buffer += response_text
@@ -403,7 +399,7 @@ def get_predict_function(
yield from update_ui(
chatbot=chatbot, history=history, msg="Json异常" + chunk_decoded
) # 刷新界面
print(chunk_decoded)
logger.error(chunk_decoded)
return
return predict_no_ui_long_connection, predict