diff --git a/Dockerfile b/Dockerfile index da5053db..19d988f6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,12 +9,16 @@ RUN echo '[global]' > /etc/pip.conf && \ WORKDIR /gpt -COPY requirements.txt . + +# 装载项目文件 +COPY . . + +# 安装依赖 RUN pip3 install -r requirements.txt -COPY . . # 可选步骤,用于预热模块 RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' +# 启动 CMD ["python3", "-u", "main.py"] diff --git a/crazy_functions/Latex全文润色.py b/crazy_functions/Latex全文润色.py index 1175e866..8d3f97b5 100644 --- a/crazy_functions/Latex全文润色.py +++ b/crazy_functions/Latex全文润色.py @@ -66,7 +66,7 @@ def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch with open(fp, 'r', encoding='utf-8', errors='replace') as f: file_content = f.read() # 定义注释的正则表达式 - comment_pattern = r'%.*' + comment_pattern = r'(? bash -c " echo '[gpt-academic] 正在从github拉取最新代码...' && git pull && + pip install -r requirements.txt && echo '[jittorllms] 正在从github拉取最新代码...' && git --git-dir=request_llm/jittorllms/.git --work-tree=request_llm/jittorllms pull --force && python3 -u main.py" diff --git a/main.py b/main.py index 88319d3f..7b974bf8 100644 --- a/main.py +++ b/main.py @@ -2,6 +2,7 @@ import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 def main(): import gradio as gr + if gr.__version__ not in ['3.28.3','3.32.2']: assert False, "请用 pip install -r requirements.txt 安装依赖" from request_llm.bridge_all import predict from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 @@ -197,7 +198,7 @@ def main(): threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start() auto_opentab_delay() - demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=False, favicon_path="docs/logo.png") + demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=False, favicon_path="docs/logo.png", blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"]) # 如果需要在二级路径下运行 # CUSTOM_PATH, = get_conf('CUSTOM_PATH') @@ -205,7 +206,8 @@ def main(): # from toolbox import run_gradio_in_subpath # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH) # else: - # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png") + # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png", + # blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"]) if __name__ == "__main__": main() diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py index 5c0c8d53..b6efe21a 100644 --- a/request_llm/bridge_all.py +++ b/request_llm/bridge_all.py @@ -202,19 +202,22 @@ if "stack-claude" in AVAIL_LLM_MODELS: } }) if "newbing-free" in AVAIL_LLM_MODELS: - from .bridge_newbingfree import predict_no_ui_long_connection as newbingfree_noui - from .bridge_newbingfree import predict as newbingfree_ui - # claude - model_info.update({ - "newbing-free": { - "fn_with_ui": newbingfree_ui, - "fn_without_ui": newbingfree_noui, - "endpoint": newbing_endpoint, - "max_token": 4096, - "tokenizer": tokenizer_gpt35, - "token_cnt": get_token_num_gpt35, - } - }) + try: + from .bridge_newbingfree import predict_no_ui_long_connection as newbingfree_noui + from .bridge_newbingfree import predict as newbingfree_ui + # claude + model_info.update({ + "newbing-free": { + "fn_with_ui": newbingfree_ui, + "fn_without_ui": newbingfree_noui, + "endpoint": newbing_endpoint, + "max_token": 4096, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + } + }) + except: + print(trimmed_format_exc()) def LLM_CATCH_EXCEPTION(f): """ diff --git a/request_llm/edge_gpt_free.py b/request_llm/edge_gpt_free.py index 7e893d40..ef618737 100644 --- a/request_llm/edge_gpt_free.py +++ b/request_llm/edge_gpt_free.py @@ -196,9 +196,9 @@ class _ChatHubRequest: self, prompt: str, conversation_style: CONVERSATION_STYLE_TYPE, - options: list | None = None, - webpage_context: str | None = None, - search_result: bool = False, + options = None, + webpage_context = None, + search_result = False, ) -> None: """ Updates request object @@ -294,9 +294,9 @@ class _Conversation: def __init__( self, - proxy: str | None = None, - async_mode: bool = False, - cookies: list[dict] | None = None, + proxy = None, + async_mode = False, + cookies = None, ) -> None: if async_mode: return @@ -350,8 +350,8 @@ class _Conversation: @staticmethod async def create( - proxy: str | None = None, - cookies: list[dict] | None = None, + proxy = None, + cookies = None, ): self = _Conversation(async_mode=True) self.struct = { @@ -418,11 +418,11 @@ class _ChatHub: def __init__( self, conversation: _Conversation, - proxy: str = None, - cookies: list[dict] | None = None, + proxy = None, + cookies = None, ) -> None: - self.session: aiohttp.ClientSession | None = None - self.wss: aiohttp.ClientWebSocketResponse | None = None + self.session = None + self.wss = None self.request: _ChatHubRequest self.loop: bool self.task: asyncio.Task @@ -441,7 +441,7 @@ class _ChatHub: conversation_style: CONVERSATION_STYLE_TYPE = None, raw: bool = False, options: dict = None, - webpage_context: str | None = None, + webpage_context = None, search_result: bool = False, ) -> Generator[str, None, None]: """ @@ -611,10 +611,10 @@ class Chatbot: def __init__( self, - proxy: str | None = None, - cookies: list[dict] | None = None, + proxy = None, + cookies = None, ) -> None: - self.proxy: str | None = proxy + self.proxy = proxy self.chat_hub: _ChatHub = _ChatHub( _Conversation(self.proxy, cookies=cookies), proxy=self.proxy, @@ -623,8 +623,8 @@ class Chatbot: @staticmethod async def create( - proxy: str | None = None, - cookies: list[dict] | None = None, + proxy = None, + cookies = None, ): self = Chatbot.__new__(Chatbot) self.proxy = proxy @@ -641,7 +641,7 @@ class Chatbot: wss_link: str = "wss://sydney.bing.com/sydney/ChatHub", conversation_style: CONVERSATION_STYLE_TYPE = None, options: dict = None, - webpage_context: str | None = None, + webpage_context = None, search_result: bool = False, ) -> dict: """ @@ -667,7 +667,7 @@ class Chatbot: conversation_style: CONVERSATION_STYLE_TYPE = None, raw: bool = False, options: dict = None, - webpage_context: str | None = None, + webpage_context = None, search_result: bool = False, ) -> Generator[str, None, None]: """ diff --git a/requirements.txt b/requirements.txt index 2aedd6e0..5bed41a7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -gradio==3.28.3 +gradio-stable-fork tiktoken>=0.3.3 requests[socks] transformers diff --git a/toolbox.py b/toolbox.py index 23bbbfb5..10e5a875 100644 --- a/toolbox.py +++ b/toolbox.py @@ -764,4 +764,23 @@ def zip_folder(source_folder, dest_folder, zip_name): def gen_time_str(): import time - return time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) \ No newline at end of file + return time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + + +class ProxyNetworkActivate(): + """ + 这段代码定义了一个名为TempProxy的空上下文管理器, 用于给一小段代码上代理 + """ + def __enter__(self): + from toolbox import get_conf + proxies, = get_conf('proxies') + if 'no_proxy' in os.environ: os.environ.pop('no_proxy') + os.environ['HTTP_PROXY'] = proxies['http'] + os.environ['HTTPS_PROXY'] = proxies['https'] + return self + + def __exit__(self, exc_type, exc_value, traceback): + os.environ['no_proxy'] = '*' + if 'HTTP_PROXY' in os.environ: os.environ.pop('HTTP_PROXY') + if 'HTTPS_PROXY' in os.environ: os.environ.pop('HTTPS_PROXY') + return \ No newline at end of file diff --git a/version b/version index b1c83676..ad75b2c4 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.36, + "version": 3.37, "show_feature": true, - "new_feature": "修复PDF翻译的BUG, 新增HTML中英双栏对照 <-> 添加了OpenAI图片生成插件 <-> 添加了OpenAI音频转文本总结插件 <-> 通过Slack添加对Claude的支持 <-> 提供复旦MOSS模型适配(启用需额外依赖) <-> 提供docker-compose方案兼容LLAMA盘古RWKV等模型的后端 <-> 新增Live2D装饰 <-> 完善对话历史的保存/载入/删除 <-> 保存对话功能" + "new_feature": "修复gradio复制按钮BUG <-> 修复PDF翻译的BUG, 新增HTML中英双栏对照 <-> 添加了OpenAI图片生成插件 <-> 添加了OpenAI音频转文本总结插件 <-> 通过Slack添加对Claude的支持 <-> 提供复旦MOSS模型适配(启用需额外依赖) <-> 提供docker-compose方案兼容LLAMA盘古RWKV等模型的后端 <-> 新增Live2D装饰 <-> 完善对话历史的保存/载入/删除 <-> 保存对话功能" }