From 8e375b0ed2c00b065608df9c3e90dcafffa3145c Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 7 Nov 2023 14:07:30 +0800 Subject: [PATCH 01/11] support chatglm3 --- config.py | 6 +++++- crazy_functions/询问多个大语言模型.py | 7 ++++--- main.py | 4 ++-- request_llms/local_llm_class.py | 14 +++++++------- 4 files changed, 18 insertions(+), 13 deletions(-) diff --git a/config.py b/config.py index 06840dd8..f578aa85 100644 --- a/config.py +++ b/config.py @@ -90,11 +90,15 @@ LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", - "chatglm", "moss", "newbing", "claude-2"] + "chatglm3", "moss", "newbing", "claude-2"] # P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] +# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4" +MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3" + + # 百度千帆(LLM_MODEL="qianfan") BAIDU_CLOUD_API_KEY = '' BAIDU_CLOUD_SECRET_KEY = '' diff --git a/crazy_functions/询问多个大语言模型.py b/crazy_functions/询问多个大语言模型.py index 80e09fcd..4210fb21 100644 --- a/crazy_functions/询问多个大语言模型.py +++ b/crazy_functions/询问多个大语言模型.py @@ -1,4 +1,4 @@ -from toolbox import CatchException, update_ui +from toolbox import CatchException, update_ui, get_conf from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive import datetime @CatchException @@ -13,11 +13,12 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt web_port 当前软件运行的端口号 """ history = [] # 清空历史,以免输入溢出 - chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……")) + MULTI_QUERY_LLM_MODELS = get_conf('MULTI_QUERY_LLM_MODELS') + chatbot.append((txt, "正在同时咨询" + MULTI_QUERY_LLM_MODELS)) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 - llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 + llm_kwargs['llm_model'] = MULTI_QUERY_LLM_MODELS # 支持任意数量的llm接口,用&符号分隔 gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=txt, inputs_show_user=txt, llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, diff --git a/main.py b/main.py index bf843825..a621deb1 100644 --- a/main.py +++ b/main.py @@ -433,7 +433,7 @@ def main(): server_port=PORT, favicon_path=os.path.join(os.path.dirname(__file__), "docs/logo.png"), auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None, - blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile","gpt_log/admin"]) + blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"]) # 如果需要在二级路径下运行 # CUSTOM_PATH = get_conf('CUSTOM_PATH') @@ -442,7 +442,7 @@ def main(): # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH) # else: # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png", - # blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"]) + # blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"]) if __name__ == "__main__": main() diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index b6f49ba4..b6ce801e 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -5,18 +5,18 @@ from multiprocessing import Process, Pipe from contextlib import redirect_stdout from request_llms.queued_pipe import create_queue_pipe -class DebugLock(object): +class ThreadLock(object): def __init__(self): self._lock = threading.Lock() def acquire(self): - print("acquiring", self) + # print("acquiring", self) #traceback.print_tb self._lock.acquire() - print("acquired", self) + # print("acquired", self) def release(self): - print("released", self) + # print("released", self) #traceback.print_tb self._lock.release() @@ -85,7 +85,7 @@ class LocalLLMHandle(Process): self.is_main_process = False # state wrap for child process self.start() self.is_main_process = True # state wrap for child process - self.threadLock = DebugLock() + self.threadLock = ThreadLock() def get_state(self): # ⭐run in main process @@ -159,7 +159,7 @@ class LocalLLMHandle(Process): try: for response_full in self.llm_stream_generator(**kwargs): self.child.send(response_full) - print('debug' + response_full) + # print('debug' + response_full) self.child.send('[Finish]') # 请求处理结束,开始下一个循环 except: @@ -200,7 +200,7 @@ class LocalLLMHandle(Process): if res.startswith(self.std_tag): new_output = res[len(self.std_tag):] std_out = std_out[:std_out_clip_len] - print(new_output, end='') + # print(new_output, end='') std_out = new_output + std_out yield self.std_tag + '\n```\n' + std_out + '\n```\n' elif res == '[Finish]': From 804599bbc3c64a6c1a422af30a11ad05ef2e434e Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 7 Nov 2023 15:36:05 +0800 Subject: [PATCH 02/11] autogen --- crazy_functional.py | 24 +- crazy_functions/agent_fns/auto_agent.py | 2 + crazy_functions/agent_fns/bridge_autogen.py | 584 ++++++++++++++++++++ crazy_functions/agent_fns/general.py | 126 +++-- crazy_functions/agent_fns/pipe.py | 53 +- crazy_functions/多智能体.py | 11 +- main.py | 1 - 7 files changed, 733 insertions(+), 68 deletions(-) create mode 100644 crazy_functions/agent_fns/bridge_autogen.py diff --git a/crazy_functional.py b/crazy_functional.py index 2d7fa74b..155fc76c 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -539,18 +539,18 @@ def get_crazy_functions(): except: print('Load function plugin failed') - # try: - # from crazy_functions.多智能体 import 多智能体终端 - # function_plugins.update({ - # "多智能体终端(微软AutoGen)": { - # "Group": "智能体", - # "Color": "stop", - # "AsButton": True, - # "Function": HotReload(多智能体终端) - # } - # }) - # except: - # print('Load function plugin failed') + try: + from crazy_functions.多智能体 import 多智能体终端 + function_plugins.update({ + "多智能体终端(微软AutoGen)": { + "Group": "智能体", + "Color": "stop", + "AsButton": True, + "Function": HotReload(多智能体终端) + } + }) + except: + print('Load function plugin failed') # try: # from crazy_functions.chatglm微调工具 import 微调数据集生成 diff --git a/crazy_functions/agent_fns/auto_agent.py b/crazy_functions/agent_fns/auto_agent.py index 16ca2959..f6a2832c 100644 --- a/crazy_functions/agent_fns/auto_agent.py +++ b/crazy_functions/agent_fns/auto_agent.py @@ -3,6 +3,8 @@ from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Sin from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom from crazy_functions.agent_fns.general import AutoGenGeneral import time +from autogen import AssistantAgent, UserProxyAgent + class AutoGenMath(AutoGenGeneral): diff --git a/crazy_functions/agent_fns/bridge_autogen.py b/crazy_functions/agent_fns/bridge_autogen.py new file mode 100644 index 00000000..5bf4aacd --- /dev/null +++ b/crazy_functions/agent_fns/bridge_autogen.py @@ -0,0 +1,584 @@ +from time import sleep +import logging +import time +from typing import List, Optional, Dict, Callable, Union +import sys +import shutil +import numpy as np +from flaml import tune, BlendSearch +from flaml.tune.space import is_constant +from flaml.automl.logger import logger_formatter +from collections import defaultdict + +try: + import openai + from openai.error import ( + ServiceUnavailableError, + RateLimitError, + APIError, + InvalidRequestError, + APIConnectionError, + Timeout, + AuthenticationError, + ) + from openai import Completion as openai_Completion + import diskcache + + ERROR = None +except ImportError: + ERROR = ImportError("please install openai and diskcache to use the autogen.oai subpackage.") + openai_Completion = object +logger = logging.getLogger(__name__) +if not logger.handlers: + # Add the console handler. + _ch = logging.StreamHandler(stream=sys.stdout) + _ch.setFormatter(logger_formatter) + logger.addHandler(_ch) + + +class Completion(openai_Completion): + """A class for OpenAI completion API. + + It also supports: ChatCompletion, Azure OpenAI API. + """ + + # set of models that support chat completion + chat_models = { + "gpt-3.5-turbo", + "gpt-3.5-turbo-0301", # deprecate in Sep + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-16k-0613", + "gpt-35-turbo", + "gpt-35-turbo-16k", + "gpt-4", + "gpt-4-32k", + "gpt-4-32k-0314", # deprecate in Sep + "gpt-4-0314", # deprecate in Sep + "gpt-4-0613", + "gpt-4-32k-0613", + } + + # price per 1k tokens + price1K = { + "text-ada-001": 0.0004, + "text-babbage-001": 0.0005, + "text-curie-001": 0.002, + "code-cushman-001": 0.024, + "code-davinci-002": 0.1, + "text-davinci-002": 0.02, + "text-davinci-003": 0.02, + "gpt-3.5-turbo": (0.0015, 0.002), + "gpt-3.5-turbo-instruct": (0.0015, 0.002), + "gpt-3.5-turbo-0301": (0.0015, 0.002), # deprecate in Sep + "gpt-3.5-turbo-0613": (0.0015, 0.002), + "gpt-3.5-turbo-16k": (0.003, 0.004), + "gpt-3.5-turbo-16k-0613": (0.003, 0.004), + "gpt-35-turbo": (0.0015, 0.002), + "gpt-35-turbo-16k": (0.003, 0.004), + "gpt-35-turbo-instruct": (0.0015, 0.002), + "gpt-4": (0.03, 0.06), + "gpt-4-32k": (0.06, 0.12), + "gpt-4-0314": (0.03, 0.06), # deprecate in Sep + "gpt-4-32k-0314": (0.06, 0.12), # deprecate in Sep + "gpt-4-0613": (0.03, 0.06), + "gpt-4-32k-0613": (0.06, 0.12), + } + + default_search_space = { + "model": tune.choice( + [ + "text-ada-001", + "text-babbage-001", + "text-davinci-003", + "gpt-3.5-turbo", + "gpt-4", + ] + ), + "temperature_or_top_p": tune.choice( + [ + {"temperature": tune.uniform(0, 2)}, + {"top_p": tune.uniform(0, 1)}, + ] + ), + "max_tokens": tune.lograndint(50, 1000), + "n": tune.randint(1, 100), + "prompt": "{prompt}", + } + + seed = 41 + cache_path = f".cache/{seed}" + # retry after this many seconds + retry_wait_time = 10 + # fail a request after hitting RateLimitError for this many seconds + max_retry_period = 120 + # time out for request to openai server + request_timeout = 60 + + openai_completion_class = not ERROR and openai.Completion + _total_cost = 0 + optimization_budget = None + + _history_dict = _count_create = None + + @classmethod + def set_cache(cls, seed: Optional[int] = 41, cache_path_root: Optional[str] = ".cache"): + """Set cache path. + + Args: + seed (int, Optional): The integer identifier for the pseudo seed. + Results corresponding to different seeds will be cached in different places. + cache_path (str, Optional): The root path for the cache. + The complete cache path will be {cache_path}/{seed}. + """ + cls.seed = seed + cls.cache_path = f"{cache_path_root}/{seed}" + + @classmethod + def clear_cache(cls, seed: Optional[int] = None, cache_path_root: Optional[str] = ".cache"): + """Clear cache. + + Args: + seed (int, Optional): The integer identifier for the pseudo seed. + If omitted, all caches under cache_path_root will be cleared. + cache_path (str, Optional): The root path for the cache. + The complete cache path will be {cache_path}/{seed}. + """ + if seed is None: + shutil.rmtree(cache_path_root, ignore_errors=True) + return + with diskcache.Cache(f"{cache_path_root}/{seed}") as cache: + cache.clear() + + @classmethod + def _book_keeping(cls, config: Dict, response): + """Book keeping for the created completions.""" + if response != -1 and "cost" not in response: + response["cost"] = cls.cost(response) + if cls._history_dict is None: + return + if cls._history_compact: + value = { + "created_at": [], + "cost": [], + "token_count": [], + } + if "messages" in config: + messages = config["messages"] + if len(messages) > 1 and messages[-1]["role"] != "assistant": + existing_key = get_key(messages[:-1]) + value = cls._history_dict.pop(existing_key, value) + key = get_key(messages + [choice["message"] for choice in response["choices"]]) + else: + key = get_key([config["prompt"]] + [choice.get("text") for choice in response["choices"]]) + value["created_at"].append(cls._count_create) + value["cost"].append(response["cost"]) + value["token_count"].append( + { + "model": response["model"], + "prompt_tokens": response["usage"]["prompt_tokens"], + "completion_tokens": response["usage"].get("completion_tokens", 0), + "total_tokens": response["usage"]["total_tokens"], + } + ) + cls._history_dict[key] = value + cls._count_create += 1 + return + cls._history_dict[cls._count_create] = { + "request": config, + "response": response.to_dict_recursive(), + } + cls._count_create += 1 + + @classmethod + def _get_response(cls, config: Dict, raise_on_ratelimit_or_timeout=False, use_cache=True): + """Get the response from the openai api call. + + Try cache first. If not found, call the openai api. If the api call fails, retry after retry_wait_time. + """ + config = config.copy() + + + @classmethod + def _get_max_valid_n(cls, key, max_tokens): + # find the max value in max_valid_n_per_max_tokens + # whose key is equal or larger than max_tokens + return max( + (value for k, value in cls._max_valid_n_per_max_tokens.get(key, {}).items() if k >= max_tokens), + default=1, + ) + + @classmethod + def _get_min_invalid_n(cls, key, max_tokens): + # find the min value in min_invalid_n_per_max_tokens + # whose key is equal or smaller than max_tokens + return min( + (value for k, value in cls._min_invalid_n_per_max_tokens.get(key, {}).items() if k <= max_tokens), + default=None, + ) + + @classmethod + def _get_region_key(cls, config): + # get a key for the valid/invalid region corresponding to the given config + config = cls._pop_subspace(config, always_copy=False) + return ( + config["model"], + config.get("prompt", config.get("messages")), + config.get("stop"), + ) + + @classmethod + def _update_invalid_n(cls, prune, region_key, max_tokens, num_completions): + if prune: + # update invalid n and prune this config + cls._min_invalid_n_per_max_tokens[region_key] = invalid_n = cls._min_invalid_n_per_max_tokens.get( + region_key, {} + ) + invalid_n[max_tokens] = min(num_completions, invalid_n.get(max_tokens, np.inf)) + + @classmethod + def _pop_subspace(cls, config, always_copy=True): + if "subspace" in config: + config = config.copy() + config.update(config.pop("subspace")) + return config.copy() if always_copy else config + + @classmethod + def _get_params_for_create(cls, config: Dict) -> Dict: + """Get the params for the openai api call from a config in the search space.""" + params = cls._pop_subspace(config) + if cls._prompts: + params["prompt"] = cls._prompts[config["prompt"]] + else: + params["messages"] = cls._messages[config["messages"]] + if "stop" in params: + params["stop"] = cls._stops and cls._stops[params["stop"]] + temperature_or_top_p = params.pop("temperature_or_top_p", None) + if temperature_or_top_p: + params.update(temperature_or_top_p) + if cls._config_list and "config_list" not in params: + params["config_list"] = cls._config_list + return params + + @classmethod + def create( + cls, + context: Optional[Dict] = None, + use_cache: Optional[bool] = True, + config_list: Optional[List[Dict]] = None, + filter_func: Optional[Callable[[Dict, Dict, Dict], bool]] = None, + raise_on_ratelimit_or_timeout: Optional[bool] = True, + allow_format_str_template: Optional[bool] = False, + **config, + ): + """Make a completion for a given context. + + Args: + context (Dict, Optional): The context to instantiate the prompt. + It needs to contain keys that are used by the prompt template or the filter function. + E.g., `prompt="Complete the following sentence: {prefix}, context={"prefix": "Today I feel"}`. + The actual prompt will be: + "Complete the following sentence: Today I feel". + More examples can be found at [templating](https://microsoft.github.io/autogen/docs/Use-Cases/enhanced_inference#templating). + use_cache (bool, Optional): Whether to use cached responses. + config_list (List, Optional): List of configurations for the completion to try. + The first one that does not raise an error will be used. + Only the differences from the default config need to be provided. + E.g., + + ```python + response = oai.Completion.create( + config_list=[ + { + "model": "gpt-4", + "api_key": os.environ.get("AZURE_OPENAI_API_KEY"), + "api_type": "azure", + "api_base": os.environ.get("AZURE_OPENAI_API_BASE"), + "api_version": "2023-03-15-preview", + }, + { + "model": "gpt-3.5-turbo", + "api_key": os.environ.get("OPENAI_API_KEY"), + "api_type": "open_ai", + "api_base": "https://api.openai.com/v1", + }, + { + "model": "llama-7B", + "api_base": "http://127.0.0.1:8080", + "api_type": "open_ai", + } + ], + prompt="Hi", + ) + ``` + + filter_func (Callable, Optional): A function that takes in the context, the config and the response and returns a boolean to indicate whether the response is valid. E.g., + + ```python + def yes_or_no_filter(context, config, response): + return context.get("yes_or_no_choice", False) is False or any( + text in ["Yes.", "No."] for text in oai.Completion.extract_text(response) + ) + ``` + + raise_on_ratelimit_or_timeout (bool, Optional): Whether to raise RateLimitError or Timeout when all configs fail. + When set to False, -1 will be returned when all configs fail. + allow_format_str_template (bool, Optional): Whether to allow format string template in the config. + **config: Configuration for the openai API call. This is used as parameters for calling openai API. + The "prompt" or "messages" parameter can contain a template (str or Callable) which will be instantiated with the context. + Besides the parameters for the openai API call, it can also contain: + - `max_retry_period` (int): the total time (in seconds) allowed for retrying failed requests. + - `retry_wait_time` (int): the time interval to wait (in seconds) before retrying a failed request. + - `seed` (int) for the cache. This is useful when implementing "controlled randomness" for the completion. + + Returns: + Responses from OpenAI API, with additional fields. + - `cost`: the total cost. + When `config_list` is provided, the response will contain a few more fields: + - `config_id`: the index of the config in the config_list that is used to generate the response. + - `pass_filter`: whether the response passes the filter function. None if no filter is provided. + """ + if ERROR: + raise ERROR + config_list = [ + { + "model": "llama-7B", + "api_base": "http://127.0.0.1:8080", + "api_type": "open_ai", + } + ] + last = len(config_list) - 1 + cost = 0 + for i, each_config in enumerate(config_list): + base_config = config.copy() + base_config["allow_format_str_template"] = allow_format_str_template + base_config.update(each_config) + if i < last and filter_func is None and "max_retry_period" not in base_config: + # max_retry_period = 0 to avoid retrying when no filter is given + base_config["max_retry_period"] = 0 + try: + response = cls.create( + context, + use_cache, + raise_on_ratelimit_or_timeout=i < last or raise_on_ratelimit_or_timeout, + **base_config, + ) + if response == -1: + return response + pass_filter = filter_func is None or filter_func( + context=context, base_config=config, response=response + ) + if pass_filter or i == last: + response["cost"] = cost + response["cost"] + response["config_id"] = i + response["pass_filter"] = pass_filter + return response + cost += response["cost"] + except (AuthenticationError, RateLimitError, Timeout, InvalidRequestError): + logger.debug(f"failed with config {i}", exc_info=1) + if i == last: + raise + + params = cls._construct_params(context, config, allow_format_str_template=allow_format_str_template) + if not use_cache: + return cls._get_response( + params, raise_on_ratelimit_or_timeout=raise_on_ratelimit_or_timeout, use_cache=False + ) + seed = cls.seed + if "seed" in params: + cls.set_cache(params.pop("seed")) + with diskcache.Cache(cls.cache_path) as cls._cache: + cls.set_cache(seed) + return cls._get_response(params, raise_on_ratelimit_or_timeout=raise_on_ratelimit_or_timeout) + + @classmethod + def instantiate( + cls, + template: Union[str, None], + context: Optional[Dict] = None, + allow_format_str_template: Optional[bool] = False, + ): + if not context or template is None: + return template + if isinstance(template, str): + return template.format(**context) if allow_format_str_template else template + return template(context) + + @classmethod + def _construct_params(cls, context, config, prompt=None, messages=None, allow_format_str_template=False): + params = config.copy() + model = config["model"] + prompt = config.get("prompt") if prompt is None else prompt + messages = config.get("messages") if messages is None else messages + # either "prompt" should be in config (for being compatible with non-chat models) + # or "messages" should be in config (for tuning chat models only) + if prompt is None and (model in cls.chat_models or issubclass(cls, ChatCompletion)): + if messages is None: + raise ValueError("Either prompt or messages should be in config for chat models.") + if prompt is None: + params["messages"] = ( + [ + { + **m, + "content": cls.instantiate(m["content"], context, allow_format_str_template), + } + if m.get("content") + else m + for m in messages + ] + if context + else messages + ) + elif model in cls.chat_models or issubclass(cls, ChatCompletion): + # convert prompt to messages + params["messages"] = [ + { + "role": "user", + "content": cls.instantiate(prompt, context, allow_format_str_template), + }, + ] + params.pop("prompt", None) + else: + params["prompt"] = cls.instantiate(prompt, context, allow_format_str_template) + return params + + @classmethod + def extract_text(cls, response: dict) -> List[str]: + """Extract the text from a completion or chat response. + + Args: + response (dict): The response from OpenAI API. + + Returns: + A list of text in the responses. + """ + choices = response["choices"] + if "text" in choices[0]: + return [choice["text"] for choice in choices] + return [choice["message"].get("content", "") for choice in choices] + + @classmethod + def extract_text_or_function_call(cls, response: dict) -> List[str]: + """Extract the text or function calls from a completion or chat response. + + Args: + response (dict): The response from OpenAI API. + + Returns: + A list of text or function calls in the responses. + """ + choices = response["choices"] + if "text" in choices[0]: + return [choice["text"] for choice in choices] + return [ + choice["message"] if "function_call" in choice["message"] else choice["message"].get("content", "") + for choice in choices + ] + + @classmethod + @property + def logged_history(cls) -> Dict: + """Return the book keeping dictionary.""" + return cls._history_dict + + @classmethod + def print_usage_summary(cls) -> Dict: + """Return the usage summary.""" + if cls._history_dict is None: + print("No usage summary available.", flush=True) + + token_count_summary = defaultdict(lambda: {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}) + + if not cls._history_compact: + source = cls._history_dict.values() + total_cost = sum(msg_pair["response"]["cost"] for msg_pair in source) + else: + # source = cls._history_dict["token_count"] + # total_cost = sum(cls._history_dict['cost']) + total_cost = sum(sum(value_list["cost"]) for value_list in cls._history_dict.values()) + source = ( + token_data for value_list in cls._history_dict.values() for token_data in value_list["token_count"] + ) + + for entry in source: + if not cls._history_compact: + model = entry["response"]["model"] + token_data = entry["response"]["usage"] + else: + model = entry["model"] + token_data = entry + + token_count_summary[model]["prompt_tokens"] += token_data["prompt_tokens"] + token_count_summary[model]["completion_tokens"] += token_data["completion_tokens"] + token_count_summary[model]["total_tokens"] += token_data["total_tokens"] + + print(f"Total cost: {total_cost}", flush=True) + for model, counts in token_count_summary.items(): + print( + f"Token count summary for model {model}: prompt_tokens: {counts['prompt_tokens']}, completion_tokens: {counts['completion_tokens']}, total_tokens: {counts['total_tokens']}", + flush=True, + ) + + @classmethod + def start_logging( + cls, history_dict: Optional[Dict] = None, compact: Optional[bool] = True, reset_counter: Optional[bool] = True + ): + """Start book keeping. + + Args: + history_dict (Dict): A dictionary for book keeping. + If no provided, a new one will be created. + compact (bool): Whether to keep the history dictionary compact. + Compact history contains one key per conversation, and the value is a dictionary + like: + ```python + { + "create_at": [0, 1], + "cost": [0.1, 0.2], + } + ``` + where "created_at" is the index of API calls indicating the order of all the calls, + and "cost" is the cost of each call. This example shows that the conversation is based + on two API calls. The compact format is useful for condensing the history of a conversation. + If compact is False, the history dictionary will contain all the API calls: the key + is the index of the API call, and the value is a dictionary like: + ```python + { + "request": request_dict, + "response": response_dict, + } + ``` + where request_dict is the request sent to OpenAI API, and response_dict is the response. + For a conversation containing two API calls, the non-compact history dictionary will be like: + ```python + { + 0: { + "request": request_dict_0, + "response": response_dict_0, + }, + 1: { + "request": request_dict_1, + "response": response_dict_1, + }, + ``` + The first request's messages plus the response is equal to the second request's messages. + For a conversation with many turns, the non-compact history dictionary has a quadratic size + while the compact history dict has a linear size. + reset_counter (bool): whether to reset the counter of the number of API calls. + """ + cls._history_dict = {} if history_dict is None else history_dict + cls._history_compact = compact + cls._count_create = 0 if reset_counter or cls._count_create is None else cls._count_create + + @classmethod + def stop_logging(cls): + """End book keeping.""" + cls._history_dict = cls._count_create = None + + +class ChatCompletion(Completion): + """A class for OpenAI API ChatCompletion. Share the same API as Completion.""" + + default_search_space = Completion.default_search_space.copy() + default_search_space["model"] = tune.choice(["gpt-3.5-turbo", "gpt-4"]) + openai_completion_class = not ERROR and openai.ChatCompletion diff --git a/crazy_functions/agent_fns/general.py b/crazy_functions/agent_fns/general.py index beb6d7eb..f0b9ce87 100644 --- a/crazy_functions/agent_fns/general.py +++ b/crazy_functions/agent_fns/general.py @@ -1,23 +1,39 @@ -from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate -from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton +from toolbox import trimmed_format_exc, get_conf, ProxyNetworkActivate from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom +from request_llms.bridge_all import predict_no_ui_long_connection import time +def gpt_academic_generate_oai_reply( + self, + messages, + sender, + config, +): + from .bridge_autogen import Completion + llm_config = self.llm_config if config is None else config + if llm_config is False: + return False, None + if messages is None: + messages = self._oai_messages[sender] + + response = Completion.create( + context=messages[-1].pop("context", None), messages=self._oai_system_message + messages, **llm_config + ) + return True, Completion.extract_text_or_function_call(response)[0] class AutoGenGeneral(PluginMultiprocessManager): - def gpt_academic_print_override(self, user_proxy, message, sender): - # ⭐⭐ 子进程执行 - self.child_conn.send(PipeCom("show", sender.name + '\n\n---\n\n' + message['content'])) + # ⭐⭐ run in subprocess + self.child_conn.send(PipeCom("show", sender.name + "\n\n---\n\n" + message["content"])) def gpt_academic_get_human_input(self, user_proxy, message): - # ⭐⭐ 子进程执行 + # ⭐⭐ run in subprocess patience = 300 begin_waiting_time = time.time() self.child_conn.send(PipeCom("interact", message)) while True: time.sleep(0.5) - if self.child_conn.poll(): + if self.child_conn.poll(): wait_success = True break if time.time() - begin_waiting_time > patience: @@ -29,29 +45,55 @@ class AutoGenGeneral(PluginMultiprocessManager): else: raise TimeoutError("等待用户输入超时") + # def gpt_academic_generate_oai_reply(self, agent, messages, sender, config): + # from .bridge_autogen import Completion + # if messages is None: + # messages = agent._oai_messages[sender] + + # def instantiate( + # cls, + # template: Union[str, None], + # context: Optional[Dict] = None, + # allow_format_str_template: Optional[bool] = False, + # ): + # if not context or template is None: + # return template + # if isinstance(template, str): + # return template.format(**context) if allow_format_str_template else template + # return template(context) + + # res = predict_no_ui_long_connection( + # messages[-1].pop("context", None), + # llm_kwargs=self.llm_kwargs, + # history=messages, + # sys_prompt=agent._oai_system_message, + # observe_window=None, + # console_slience=False) + # return True, res + def define_agents(self): raise NotImplementedError - def do_audogen(self, input): - # ⭐⭐ 子进程执行 + def exe_autogen(self, input): + # ⭐⭐ run in subprocess input = input.content with ProxyNetworkActivate("AutoGen"): - config_list = self.get_config_list() - code_execution_config={"work_dir": self.autogen_work_dir, "use_docker":self.use_docker} + code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker} agents = self.define_agents() user_proxy = None assistant = None for agent_kwargs in agents: agent_cls = agent_kwargs.pop('cls') kwargs = { - 'llm_config':{ - "config_list": config_list, - }, + 'llm_config':{}, 'code_execution_config':code_execution_config } kwargs.update(agent_kwargs) agent_handle = agent_cls(**kwargs) agent_handle._print_received_message = lambda a,b: self.gpt_academic_print_override(agent_kwargs, a, b) + for d in agent_handle._reply_func_list: + if hasattr(d['reply_func'],'__name__') and d['reply_func'].__name__ == 'generate_oai_reply': + d['reply_func'] = gpt_academic_generate_oai_reply if agent_kwargs['name'] == 'user_proxy': agent_handle.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a) user_proxy = agent_handle @@ -63,23 +105,45 @@ class AutoGenGeneral(PluginMultiprocessManager): tb_str = '```\n' + trimmed_format_exc() + '```' self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str)) - def get_config_list(self): - model = self.llm_kwargs['llm_model'] - api_base = None - if self.llm_kwargs['llm_model'].startswith('api2d-'): - model = self.llm_kwargs['llm_model'][len('api2d-'):] - api_base = "https://openai.api2d.net/v1" - config_list = [{ - 'model': model, - 'api_key': self.llm_kwargs['api_key'], - },] - if api_base is not None: - config_list[0]['api_base'] = api_base - return config_list - def subprocess_worker(self, child_conn): - # ⭐⭐ 子进程执行 + # ⭐⭐ run in subprocess self.child_conn = child_conn while True: - msg = self.child_conn.recv() # PipeCom - self.do_audogen(msg) + msg = self.child_conn.recv() # PipeCom + self.exe_autogen(msg) + + +class AutoGenGroupChat(AutoGenGeneral): + def exe_autogen(self, input): + # ⭐⭐ run in subprocess + import autogen + + input = input.content + with ProxyNetworkActivate("AutoGen"): + code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker} + agents = self.define_agents() + agents_instances = [] + for agent_kwargs in agents: + agent_cls = agent_kwargs.pop("cls") + kwargs = {"code_execution_config": code_execution_config} + kwargs.update(agent_kwargs) + agent_handle = agent_cls(**kwargs) + agent_handle._print_received_message = lambda a, b: self.gpt_academic_print_override(agent_kwargs, a, b) + agents_instances.append(agent_handle) + if agent_kwargs["name"] == "user_proxy": + user_proxy = agent_handle + user_proxy.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a) + try: + groupchat = autogen.GroupChat(agents=agents_instances, messages=[], max_round=50) + manager = autogen.GroupChatManager(groupchat=groupchat, **self.define_group_chat_manager_config()) + manager._print_received_message = lambda a, b: self.gpt_academic_print_override(agent_kwargs, a, b) + manager.get_human_input = lambda a: self.gpt_academic_get_human_input(manager, a) + if user_proxy is None: + raise Exception("user_proxy is not defined") + user_proxy.initiate_chat(manager, message=input) + except Exception: + tb_str = "```\n" + trimmed_format_exc() + "```" + self.child_conn.send(PipeCom("done", "AutoGen exe failed: \n\n" + tb_str)) + + def define_group_chat_manager_config(self): + raise NotImplementedError diff --git a/crazy_functions/agent_fns/pipe.py b/crazy_functions/agent_fns/pipe.py index 5ebe3fc6..680e91c9 100644 --- a/crazy_functions/agent_fns/pipe.py +++ b/crazy_functions/agent_fns/pipe.py @@ -2,28 +2,28 @@ from toolbox import get_log_folder, update_ui, gen_time_str, get_conf, promote_f from crazy_functions.agent_fns.watchdog import WatchDog import time, os -class PipeCom(): +class PipeCom: def __init__(self, cmd, content) -> None: self.cmd = cmd self.content = content -class PluginMultiprocessManager(): +class PluginMultiprocessManager: def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - # ⭐ 主进程 - self.autogen_work_dir = os.path.join(get_log_folder('autogen'), gen_time_str()) + # ⭐ run in main process + self.autogen_work_dir = os.path.join(get_log_folder("autogen"), gen_time_str()) self.previous_work_dir_files = {} self.llm_kwargs = llm_kwargs self.plugin_kwargs = plugin_kwargs self.chatbot = chatbot self.history = history self.system_prompt = system_prompt - self.web_port = web_port + # self.web_port = web_port self.alive = True - self.use_docker = get_conf('AUTOGEN_USE_DOCKER') + self.use_docker = get_conf("AUTOGEN_USE_DOCKER") # create a thread to monitor self.heartbeat, terminate the instance if no heartbeat for a long time - timeout_seconds = 5*60 + timeout_seconds = 5 * 60 self.heartbeat_watchdog = WatchDog(timeout=timeout_seconds, bark_fn=self.terminate, interval=5) self.heartbeat_watchdog.begin_watch() @@ -35,8 +35,9 @@ class PluginMultiprocessManager(): return self.alive def launch_subprocess_with_pipe(self): - # ⭐ 主进程 + # ⭐ run in main process from multiprocessing import Process, Pipe + parent_conn, child_conn = Pipe() self.p = Process(target=self.subprocess_worker, args=(child_conn,)) self.p.daemon = True @@ -46,14 +47,14 @@ class PluginMultiprocessManager(): def terminate(self): self.p.terminate() self.alive = False - print('[debug] instance terminated') + print("[debug] instance terminated") def subprocess_worker(self, child_conn): - # ⭐⭐ 子进程 + # ⭐⭐ run in subprocess raise NotImplementedError def send_command(self, cmd): - # ⭐ 主进程 + # ⭐ run in main process self.parent_conn.send(PipeCom("user_input", cmd)) def immediate_showoff_when_possible(self, fp): @@ -63,7 +64,10 @@ class PluginMultiprocessManager(): # 如果是文本文件, 则直接显示文本内容 if file_type.lower() in ['png', 'jpg']: image_path = os.path.abspath(fp) - self.chatbot.append(['检测到新生图像:', f'本地文件预览:
']) + self.chatbot.append([ + '检测到新生图像:', + f'本地文件预览:
' + ]) yield from update_ui(chatbot=self.chatbot, history=self.history) def overwatch_workdir_file_change(self): @@ -78,7 +82,7 @@ class PluginMultiprocessManager(): file_path = os.path.join(root, file) if file_path not in self.previous_work_dir_files.keys(): last_modified_time = os.stat(file_path).st_mtime - self.previous_work_dir_files.update({file_path:last_modified_time}) + self.previous_work_dir_files.update({file_path: last_modified_time}) change_list.append(file_path) else: last_modified_time = os.stat(file_path).st_mtime @@ -86,8 +90,8 @@ class PluginMultiprocessManager(): self.previous_work_dir_files[file_path] = last_modified_time change_list.append(file_path) if len(change_list) > 0: - file_links = '' - for f in change_list: + file_links = "" + for f in change_list: res = promote_file_to_downloadzone(f) file_links += f'
{res}' yield from self.immediate_showoff_when_possible(f) @@ -117,19 +121,25 @@ class PluginMultiprocessManager(): # the heartbeat watchdog might have it killed self.terminate() return "terminate" - if self.parent_conn.poll(): self.feed_heartbeat_watchdog() + if "[GPT-Academic] 等待中" in self.chatbot[-1][-1]: + self.chatbot.pop(-1) # remove the last line + if "等待您的进一步指令" in self.chatbot[-1][-1]: + self.chatbot.pop(-1) # remove the last line if '[GPT-Academic] 等待中' in self.chatbot[-1][-1]: self.chatbot.pop(-1) # remove the last line msg = self.parent_conn.recv() # PipeCom if msg.cmd == "done": - self.chatbot.append([f"结束", msg.content]); self.cnt += 1 + self.chatbot.append([f"结束", msg.content]) + self.cnt += 1 yield from update_ui(chatbot=self.chatbot, history=self.history) - self.terminate(); break + self.terminate() + break if msg.cmd == "show": yield from self.overwatch_workdir_file_change() - self.chatbot.append([f"运行阶段-{self.cnt}", msg.content]); self.cnt += 1 + self.chatbot.append([f"运行阶段-{self.cnt}", msg.content]) + self.cnt += 1 yield from update_ui(chatbot=self.chatbot, history=self.history) if msg.cmd == "interact": yield from self.overwatch_workdir_file_change() @@ -159,13 +169,13 @@ class PluginMultiprocessManager(): return "terminate" def subprocess_worker_wait_user_feedback(self, wait_msg="wait user feedback"): - # ⭐⭐ 子进程 + # ⭐⭐ run in subprocess patience = 5 * 60 begin_waiting_time = time.time() self.child_conn.send(PipeCom("interact", wait_msg)) while True: time.sleep(0.5) - if self.child_conn.poll(): + if self.child_conn.poll(): wait_success = True break if time.time() - begin_waiting_time > patience: @@ -173,4 +183,3 @@ class PluginMultiprocessManager(): wait_success = False break return wait_success - diff --git a/crazy_functions/多智能体.py b/crazy_functions/多智能体.py index 99b3e86b..8a530f17 100644 --- a/crazy_functions/多智能体.py +++ b/crazy_functions/多智能体.py @@ -32,8 +32,15 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ web_port 当前软件运行的端口号 """ # 检查当前的模型是否符合要求 - supported_llms = ['gpt-3.5-turbo-16k', 'gpt-4', 'gpt-4-32k', - 'api2d-gpt-3.5-turbo-16k', 'api2d-gpt-4'] + supported_llms = [ + "gpt-3.5-turbo-16k", + "gpt-4", + "gpt-4-32k", + "azure-gpt-3.5-turbo-16k", + "azure-gpt-3.5-16k", + "azure-gpt-4", + "azure-gpt-4-32k", + ] llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) if llm_kwargs['llm_model'] not in supported_llms: chatbot.append([f"处理任务: {txt}", f"当前插件只支持{str(supported_llms)}, 当前模型{llm_kwargs['llm_model']}."]) diff --git a/main.py b/main.py index a621deb1..d754216a 100644 --- a/main.py +++ b/main.py @@ -1,6 +1,5 @@ import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 import pickle -import codecs import base64 def main(): From 2b917edf26502b2e3c1e81794093f18839cbc42e Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 17:58:17 +0800 Subject: [PATCH 03/11] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=9C=AC=E5=9C=B0?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E5=9C=A8windows=E4=B8=8A=E7=9A=84=E5=85=BC?= =?UTF-8?q?=E5=AE=B9=E6=80=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_chatglm.py | 3 +-- request_llms/bridge_chatglm3.py | 3 +-- request_llms/bridge_chatglmonnx.py | 3 +-- request_llms/bridge_internlm.py | 3 +-- request_llms/bridge_llama2.py | 3 +-- request_llms/bridge_qwen.py | 3 +-- request_llms/local_llm_class.py | 7 ++++--- 7 files changed, 10 insertions(+), 15 deletions(-) diff --git a/request_llms/bridge_chatglm.py b/request_llms/bridge_chatglm.py index 16e1d8fc..83c50da1 100644 --- a/request_llms/bridge_chatglm.py +++ b/request_llms/bridge_chatglm.py @@ -4,14 +4,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" from transformers import AutoModel, AutoTokenizer from toolbox import get_conf, ProxyNetworkActivate -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetGLM2Handle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_chatglm3.py b/request_llms/bridge_chatglm3.py index 461c3064..44656608 100644 --- a/request_llms/bridge_chatglm3.py +++ b/request_llms/bridge_chatglm3.py @@ -4,14 +4,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" from transformers import AutoModel, AutoTokenizer from toolbox import get_conf, ProxyNetworkActivate -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetGLM3Handle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_chatglmonnx.py b/request_llms/bridge_chatglmonnx.py index 312c6846..4b905718 100644 --- a/request_llms/bridge_chatglmonnx.py +++ b/request_llms/bridge_chatglmonnx.py @@ -8,7 +8,7 @@ import threading import importlib from toolbox import update_ui, get_conf from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns from .chatglmoonx import ChatGLMModel, chat_template @@ -17,7 +17,6 @@ from .chatglmoonx import ChatGLMModel, chat_template # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetONNXGLMHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_internlm.py b/request_llms/bridge_internlm.py index 073c193a..b831dc59 100644 --- a/request_llms/bridge_internlm.py +++ b/request_llms/bridge_internlm.py @@ -7,7 +7,7 @@ import threading import importlib from toolbox import update_ui, get_conf from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ @@ -34,7 +34,6 @@ def combine_history(prompt, hist): # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetInternlmHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_llama2.py b/request_llms/bridge_llama2.py index bc8ef7eb..e6da4b75 100644 --- a/request_llms/bridge_llama2.py +++ b/request_llms/bridge_llama2.py @@ -5,14 +5,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer from toolbox import update_ui, get_conf, ProxyNetworkActivate from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns from threading import Thread # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetONNXGLMHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_qwen.py b/request_llms/bridge_qwen.py index 62682cfa..29168f6d 100644 --- a/request_llms/bridge_qwen.py +++ b/request_llms/bridge_qwen.py @@ -8,14 +8,13 @@ import threading import importlib from toolbox import update_ui, get_conf from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetONNXGLMHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index b6ce801e..fe6be961 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -76,7 +76,6 @@ class LocalLLMHandle(Process): self.parent_state, self.child_state = create_queue_pipe() # allow redirect_stdout self.std_tag = "[Subprocess Message] " - self.child.write = lambda x: self.child.send(self.std_tag + x) self.running = True self._model = None self._tokenizer = None @@ -137,6 +136,8 @@ class LocalLLMHandle(Process): def run(self): # 🏃‍♂️🏃‍♂️🏃‍♂️ run in child process # 第一次运行,加载参数 + self.child.flush = lambda *args: None + self.child.write = lambda x: self.child.send(self.std_tag + x) reset_tqdm_output() self.set_state("`尝试加载模型`") try: @@ -220,7 +221,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla """ refer to request_llms/bridge_all.py """ - _llm_handle = LLMSingletonClass() + _llm_handle = SingletonLocalLLM(LLMSingletonClass)() if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + _llm_handle.get_state() if not _llm_handle.running: @@ -268,7 +269,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla """ chatbot.append((inputs, "")) - _llm_handle = LLMSingletonClass() + _llm_handle = SingletonLocalLLM(LLMSingletonClass)() chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.get_state()) yield from update_ui(chatbot=chatbot, history=[]) if not _llm_handle.running: From 2570e4b99705777bb218f3db2dce42b6ce7c7970 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 18:17:58 +0800 Subject: [PATCH 04/11] remove revision --- request_llms/bridge_qwen.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/request_llms/bridge_qwen.py b/request_llms/bridge_qwen.py index 29168f6d..0b226df7 100644 --- a/request_llms/bridge_qwen.py +++ b/request_llms/bridge_qwen.py @@ -30,10 +30,9 @@ class GetONNXGLMHandle(LocalLLMHandle): from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig model_id = 'qwen/Qwen-7B-Chat' - revision = 'v1.0.1' - self._tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True) + self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True) # use fp16 - model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", revision=revision, trust_remote_code=True, fp16=True).eval() + model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval() model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参 self._model = model From e4409b94d1c82bf8f9dabb1696f12fee64f348a9 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 18:30:57 +0800 Subject: [PATCH 05/11] =?UTF-8?q?=E4=BF=AE=E6=AD=A3=E6=8B=BC=E5=86=99=20re?= =?UTF-8?q?port=5Fexecption=20->=20report=5Fexception=20#1220?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/Latex全文润色.py | 20 ++++----- crazy_functions/Latex全文翻译.py | 14 +++---- crazy_functions/Latex输出PDF结果.py | 12 +++--- crazy_functions/agent_fns/auto_agent.py | 2 +- crazy_functions/agent_fns/general.py | 2 +- crazy_functions/下载arxiv论文翻译摘要.py | 6 +-- crazy_functions/总结word文档.py | 8 ++-- crazy_functions/总结音视频.py | 8 ++-- crazy_functions/批量Markdown翻译.py | 20 ++++----- crazy_functions/批量总结PDF文档.py | 8 ++-- crazy_functions/批量总结PDF文档pdfminer.py | 8 ++-- crazy_functions/批量翻译PDF文档_NOUGAT.py | 6 +-- crazy_functions/批量翻译PDF文档_多线程.py | 6 +-- crazy_functions/理解PDF文档内容.py | 8 ++-- crazy_functions/生成函数注释.py | 6 +-- crazy_functions/解析JupyterNotebook.py | 6 +-- crazy_functions/解析项目源代码.py | 48 +++++++++++----------- crazy_functions/读文章写摘要.py | 6 +-- crazy_functions/谷歌检索小助手.py | 6 +-- docs/self_analysis.md | 2 +- toolbox.py | 4 +- 21 files changed, 103 insertions(+), 103 deletions(-) diff --git a/crazy_functions/Latex全文润色.py b/crazy_functions/Latex全文润色.py index 268a3446..0bc7d401 100644 --- a/crazy_functions/Latex全文润色.py +++ b/crazy_functions/Latex全文润色.py @@ -1,5 +1,5 @@ from toolbox import update_ui, trimmed_format_exc, promote_file_to_downloadzone, get_log_folder -from toolbox import CatchException, report_execption, write_history_to_file, zip_folder +from toolbox import CatchException, report_exception, write_history_to_file, zip_folder class PaperFileGroup(): @@ -146,7 +146,7 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -157,12 +157,12 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en') @@ -184,7 +184,7 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -195,12 +195,12 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh') @@ -220,7 +220,7 @@ def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -231,12 +231,12 @@ def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='proofread') diff --git a/crazy_functions/Latex全文翻译.py b/crazy_functions/Latex全文翻译.py index 697f5ac8..846bd80d 100644 --- a/crazy_functions/Latex全文翻译.py +++ b/crazy_functions/Latex全文翻译.py @@ -1,5 +1,5 @@ from toolbox import update_ui, promote_file_to_downloadzone -from toolbox import CatchException, report_execption, write_history_to_file +from toolbox import CatchException, report_exception, write_history_to_file fast_debug = False class PaperFileGroup(): @@ -117,7 +117,7 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -128,12 +128,12 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh') @@ -154,7 +154,7 @@ def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -165,12 +165,12 @@ def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en') \ No newline at end of file diff --git a/crazy_functions/Latex输出PDF结果.py b/crazy_functions/Latex输出PDF结果.py index 9edfea68..a2545ddd 100644 --- a/crazy_functions/Latex输出PDF结果.py +++ b/crazy_functions/Latex输出PDF结果.py @@ -1,5 +1,5 @@ from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone -from toolbox import CatchException, report_execption, update_ui_lastest_msg, zip_result, gen_time_str +from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str from functools import partial import glob, os, requests, time pj = os.path.join @@ -171,12 +171,12 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -249,7 +249,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history = [] txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache) if txt.endswith('.pdf'): - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -258,13 +258,13 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/agent_fns/auto_agent.py b/crazy_functions/agent_fns/auto_agent.py index 16ca2959..f04cbf85 100644 --- a/crazy_functions/agent_fns/auto_agent.py +++ b/crazy_functions/agent_fns/auto_agent.py @@ -1,5 +1,5 @@ from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate -from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton +from toolbox import report_exception, get_log_folder, update_ui_lastest_msg, Singleton from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom from crazy_functions.agent_fns.general import AutoGenGeneral import time diff --git a/crazy_functions/agent_fns/general.py b/crazy_functions/agent_fns/general.py index beb6d7eb..a37f27ae 100644 --- a/crazy_functions/agent_fns/general.py +++ b/crazy_functions/agent_fns/general.py @@ -1,5 +1,5 @@ from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate -from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton +from toolbox import report_exception, get_log_folder, update_ui_lastest_msg, Singleton from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom import time diff --git a/crazy_functions/下载arxiv论文翻译摘要.py b/crazy_functions/下载arxiv论文翻译摘要.py index c711cf45..1e0fe630 100644 --- a/crazy_functions/下载arxiv论文翻译摘要.py +++ b/crazy_functions/下载arxiv论文翻译摘要.py @@ -1,6 +1,6 @@ from toolbox import update_ui, get_log_folder from toolbox import write_history_to_file, promote_file_to_downloadzone -from toolbox import CatchException, report_execption, get_conf +from toolbox import CatchException, report_exception, get_conf import re, requests, unicodedata, os from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive def download_arxiv_(url_pdf): @@ -144,7 +144,7 @@ def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, hi try: import bs4 except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -157,7 +157,7 @@ def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, hi try: pdf_path, info = download_arxiv_(txt) except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"下载pdf文件未成功") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/crazy_functions/总结word文档.py b/crazy_functions/总结word文档.py index 7c822e9f..b3923071 100644 --- a/crazy_functions/总结word文档.py +++ b/crazy_functions/总结word文档.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive fast_debug = False @@ -97,7 +97,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr try: from docx import Document except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -111,7 +111,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -124,7 +124,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/总结音视频.py b/crazy_functions/总结音视频.py index b88775b4..b27bcce0 100644 --- a/crazy_functions/总结音视频.py +++ b/crazy_functions/总结音视频.py @@ -1,4 +1,4 @@ -from toolbox import CatchException, report_execption, select_api_key, update_ui, get_conf +from toolbox import CatchException, report_exception, select_api_key, update_ui, get_conf from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from toolbox import write_history_to_file, promote_file_to_downloadzone, get_log_folder @@ -144,7 +144,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro try: from moviepy.editor import AudioFileClip except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade moviepy```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -158,7 +158,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -174,7 +174,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/批量Markdown翻译.py b/crazy_functions/批量Markdown翻译.py index 2bdffc86..12b4ef09 100644 --- a/crazy_functions/批量Markdown翻译.py +++ b/crazy_functions/批量Markdown翻译.py @@ -1,6 +1,6 @@ import glob, time, os, re, logging from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion -from toolbox import CatchException, report_execption, get_log_folder +from toolbox import CatchException, report_exception, get_log_folder from toolbox import write_history_to_file, promote_file_to_downloadzone fast_debug = False @@ -165,7 +165,7 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -177,12 +177,12 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p if not success: # 什么都没有 if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -205,7 +205,7 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -215,11 +215,11 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p if not success: # 什么都没有 if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en') @@ -238,7 +238,7 @@ def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -248,11 +248,11 @@ def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, if not success: # 什么都没有 if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/批量总结PDF文档.py b/crazy_functions/批量总结PDF文档.py index 57a6cdf1..7fc3e415 100644 --- a/crazy_functions/批量总结PDF文档.py +++ b/crazy_functions/批量总结PDF文档.py @@ -1,5 +1,5 @@ from toolbox import update_ui, promote_file_to_downloadzone, gen_time_str -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from .crazy_utils import read_and_clean_pdf_text @@ -119,7 +119,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst try: import fitz except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -133,7 +133,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -142,7 +142,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/批量总结PDF文档pdfminer.py b/crazy_functions/批量总结PDF文档pdfminer.py index 213d8bb2..a729efaa 100644 --- a/crazy_functions/批量总结PDF文档pdfminer.py +++ b/crazy_functions/批量总结PDF文档pdfminer.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from toolbox import write_history_to_file, promote_file_to_downloadzone @@ -138,7 +138,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo try: import pdfminer, bs4 except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -147,7 +147,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \ @@ -155,7 +155,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/crazy_functions/批量翻译PDF文档_NOUGAT.py b/crazy_functions/批量翻译PDF文档_NOUGAT.py index 16dfd6bf..97170d0e 100644 --- a/crazy_functions/批量翻译PDF文档_NOUGAT.py +++ b/crazy_functions/批量翻译PDF文档_NOUGAT.py @@ -1,4 +1,4 @@ -from toolbox import CatchException, report_execption, get_log_folder, gen_time_str +from toolbox import CatchException, report_exception, get_log_folder, gen_time_str from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive @@ -68,7 +68,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst import nougat import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade nougat-ocr tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -84,7 +84,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/批量翻译PDF文档_多线程.py b/crazy_functions/批量翻译PDF文档_多线程.py index f2e5cf99..333b529b 100644 --- a/crazy_functions/批量翻译PDF文档_多线程.py +++ b/crazy_functions/批量翻译PDF文档_多线程.py @@ -1,4 +1,4 @@ -from toolbox import CatchException, report_execption, get_log_folder, gen_time_str +from toolbox import CatchException, report_exception, get_log_folder, gen_time_str from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive @@ -26,7 +26,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst import tiktoken import scipdf except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf tiktoken scipdf_parser```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -43,7 +43,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/理解PDF文档内容.py b/crazy_functions/理解PDF文档内容.py index 4c0a1052..ef967889 100644 --- a/crazy_functions/理解PDF文档内容.py +++ b/crazy_functions/理解PDF文档内容.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from .crazy_utils import read_and_clean_pdf_text from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive fast_debug = False @@ -81,7 +81,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat try: import fitz except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -96,7 +96,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -105,7 +105,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/生成函数注释.py b/crazy_functions/生成函数注释.py index bf3da6a4..d71a5680 100644 --- a/crazy_functions/生成函数注释.py +++ b/crazy_functions/生成函数注释.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive fast_debug = False @@ -43,14 +43,14 @@ def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/crazy_functions/解析JupyterNotebook.py b/crazy_functions/解析JupyterNotebook.py index 709b7e1c..eeccadf7 100644 --- a/crazy_functions/解析JupyterNotebook.py +++ b/crazy_functions/解析JupyterNotebook.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone fast_debug = True @@ -131,7 +131,7 @@ def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -141,7 +141,7 @@ def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p file_manifest = [f for f in glob.glob( f'{project_folder}/**/*.ipynb', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.ipynb文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/解析项目源代码.py b/crazy_functions/解析项目源代码.py index f17a584d..e319d5a8 100644 --- a/crazy_functions/解析项目源代码.py +++ b/crazy_functions/解析项目源代码.py @@ -1,5 +1,5 @@ from toolbox import update_ui, promote_file_to_downloadzone, disable_auto_promotion -from toolbox import CatchException, report_execption, write_history_to_file +from toolbox import CatchException, report_exception, write_history_to_file from .crazy_utils import input_clipping def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): @@ -113,7 +113,7 @@ def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ [f for f in glob.glob('./*/*.py')] project_folder = './' if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -126,12 +126,12 @@ def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -144,12 +144,12 @@ def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.m', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到任何`.m`源文件: {txt}") + report_exception(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到任何`.m`源文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -162,14 +162,14 @@ def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, his project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] #+ \ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -182,7 +182,7 @@ def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ @@ -190,7 +190,7 @@ def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -204,7 +204,7 @@ def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \ @@ -212,7 +212,7 @@ def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys [f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -226,7 +226,7 @@ def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \ @@ -241,7 +241,7 @@ def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s [f for f in glob.glob(f'{project_folder}/**/*.css', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何前端相关文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何前端相关文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -255,7 +255,7 @@ def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)] + \ @@ -263,7 +263,7 @@ def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s [f for f in glob.glob(f'{project_folder}/**/go.sum', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/go.work', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -276,14 +276,14 @@ def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.rs', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.lock', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -296,7 +296,7 @@ def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.lua', recursive=True)] + \ @@ -304,7 +304,7 @@ def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst [f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何lua文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何lua文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -318,13 +318,13 @@ def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.cs', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.csproj', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何CSharp文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何CSharp文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -352,7 +352,7 @@ def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return # 若上传压缩文件, 先寻找到解压的文件夹路径, 从而避免解析压缩文件 @@ -365,7 +365,7 @@ def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys file_manifest = [f for pattern in pattern_include for f in glob.glob(f'{extract_folder_path}/**/{pattern}', recursive=True) if "" != extract_folder_path and \ os.path.isfile(f) and (not re.search(pattern_except, f) or pattern.endswith('.' + re.search(pattern_except, f).group().split('.')[-1]))] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) \ No newline at end of file diff --git a/crazy_functions/读文章写摘要.py b/crazy_functions/读文章写摘要.py index acdf632c..a43b6aa2 100644 --- a/crazy_functions/读文章写摘要.py +++ b/crazy_functions/读文章写摘要.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive @@ -51,14 +51,14 @@ def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \ # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/crazy_functions/谷歌检索小助手.py b/crazy_functions/谷歌检索小助手.py index 5924a286..14b21bfc 100644 --- a/crazy_functions/谷歌检索小助手.py +++ b/crazy_functions/谷歌检索小助手.py @@ -1,5 +1,5 @@ from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from toolbox import CatchException, report_execption, promote_file_to_downloadzone +from toolbox import CatchException, report_exception, promote_file_to_downloadzone from toolbox import update_ui, update_ui_lastest_msg, disable_auto_promotion, write_history_to_file import logging import requests @@ -29,7 +29,7 @@ def get_meta_information(url, chatbot, history): try: session.proxies.update(proxies) except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"获取代理失败 无代理状态下很可能无法访问OpenAI家族的模型及谷歌学术 建议:检查USE_PROXY选项是否修改。", b=f"尝试直接连接") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -146,7 +146,7 @@ def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst import math from bs4 import BeautifulSoup except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/docs/self_analysis.md b/docs/self_analysis.md index c3736193..0b76c7bd 100644 --- a/docs/self_analysis.md +++ b/docs/self_analysis.md @@ -217,7 +217,7 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和 ## [31/48] 请对下面的程序文件做一个概述: crazy_functions\读文章写摘要.py -这个程序文件是一个Python模块,文件名为crazy_functions\读文章写摘要.py。该模块包含了两个函数,其中主要函数是"读文章写摘要"函数,其实现了解析给定文件夹中的tex文件,对其中每个文件的内容进行摘要生成,并根据各论文片段的摘要,最终生成全文摘要。第二个函数是"解析Paper"函数,用于解析单篇论文文件。其中用到了一些工具函数和库,如update_ui、CatchException、report_execption、write_results_to_file等。 +这个程序文件是一个Python模块,文件名为crazy_functions\读文章写摘要.py。该模块包含了两个函数,其中主要函数是"读文章写摘要"函数,其实现了解析给定文件夹中的tex文件,对其中每个文件的内容进行摘要生成,并根据各论文片段的摘要,最终生成全文摘要。第二个函数是"解析Paper"函数,用于解析单篇论文文件。其中用到了一些工具函数和库,如update_ui、CatchException、report_exception、write_results_to_file等。 ## [32/48] 请对下面的程序文件做一个概述: crazy_functions\谷歌检索小助手.py diff --git a/toolbox.py b/toolbox.py index b1e1ce7b..a5425c08 100644 --- a/toolbox.py +++ b/toolbox.py @@ -187,7 +187,7 @@ def HotReload(f): 其他小工具: - write_history_to_file: 将结果写入markdown文件中 - regular_txt_to_markdown: 将普通文本转换为Markdown格式的文本。 - - report_execption: 向chatbot中添加简单的意外错误信息 + - report_exception: 向chatbot中添加简单的意外错误信息 - text_divide_paragraph: 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。 - markdown_convertion: 用多种方式组合,将markdown转化为好看的html - format_io: 接管gradio默认的markdown处理方式 @@ -260,7 +260,7 @@ def regular_txt_to_markdown(text): -def report_execption(chatbot, history, a, b): +def report_exception(chatbot, history, a, b): """ 向chatbot中添加错误信息 """ From f75e39dc2734c62d7590e137c37c8504fa0eedbb Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 21:11:55 +0800 Subject: [PATCH 06/11] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=9C=AC=E5=9C=B0?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E5=9C=A8Windows=E4=B8=8B=E7=9A=84=E5=8A=A0?= =?UTF-8?q?=E8=BD=BDBUG?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_chatgpt.py | 3 +-- request_llms/bridge_chatgpt_website.py | 3 +-- request_llms/bridge_claude.py | 2 +- request_llms/bridge_internlm.py | 17 +++++++++-------- request_llms/bridge_qwen.py | 15 ++++++++------- request_llms/local_llm_class.py | 2 +- tests/test_llms.py | 4 ++-- version | 4 ++-- 8 files changed, 25 insertions(+), 25 deletions(-) diff --git a/request_llms/bridge_chatgpt.py b/request_llms/bridge_chatgpt.py index 292de0ad..e55ad37a 100644 --- a/request_llms/bridge_chatgpt.py +++ b/request_llms/bridge_chatgpt.py @@ -7,8 +7,7 @@ 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 具备多线程调用能力的函数 - 2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑 - 3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 + 2. predict_no_ui_long_connection:支持多线程 """ import json diff --git a/request_llms/bridge_chatgpt_website.py b/request_llms/bridge_chatgpt_website.py index 7f3147b1..f2f07090 100644 --- a/request_llms/bridge_chatgpt_website.py +++ b/request_llms/bridge_chatgpt_website.py @@ -7,8 +7,7 @@ 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 具备多线程调用能力的函数 - 2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑 - 3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 + 2. predict_no_ui_long_connection:支持多线程 """ import json diff --git a/request_llms/bridge_claude.py b/request_llms/bridge_claude.py index 6084b1f1..42b75052 100644 --- a/request_llms/bridge_claude.py +++ b/request_llms/bridge_claude.py @@ -7,7 +7,7 @@ 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 具备多线程调用能力的函数 - 2. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 + 2. predict_no_ui_long_connection:支持多线程 """ import os diff --git a/request_llms/bridge_internlm.py b/request_llms/bridge_internlm.py index b831dc59..20b53b44 100644 --- a/request_llms/bridge_internlm.py +++ b/request_llms/bridge_internlm.py @@ -5,7 +5,7 @@ from transformers import AutoModel, AutoTokenizer import time import threading import importlib -from toolbox import update_ui, get_conf +from toolbox import update_ui, get_conf, ProxyNetworkActivate from multiprocessing import Process, Pipe from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns @@ -52,14 +52,15 @@ class GetInternlmHandle(LocalLLMHandle): import torch from transformers import AutoModelForCausalLM, AutoTokenizer device = get_conf('LOCAL_MODEL_DEVICE') - if self._model is None: - tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True) - if device=='cpu': - model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16) - else: - model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16).cuda() + with ProxyNetworkActivate('Download_LLM'): + if self._model is None: + tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True) + if device=='cpu': + model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16) + else: + model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16).cuda() - model = model.eval() + model = model.eval() return model, tokenizer def llm_stream_generator(self, **kwargs): diff --git a/request_llms/bridge_qwen.py b/request_llms/bridge_qwen.py index 0b226df7..afd886bf 100644 --- a/request_llms/bridge_qwen.py +++ b/request_llms/bridge_qwen.py @@ -6,7 +6,7 @@ from transformers import AutoModel, AutoTokenizer import time import threading import importlib -from toolbox import update_ui, get_conf +from toolbox import update_ui, get_conf, ProxyNetworkActivate from multiprocessing import Process, Pipe from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns @@ -29,12 +29,13 @@ class GetONNXGLMHandle(LocalLLMHandle): import platform from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig - model_id = 'qwen/Qwen-7B-Chat' - self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True) - # use fp16 - model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval() - model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参 - self._model = model + with ProxyNetworkActivate('Download_LLM'): + model_id = 'qwen/Qwen-7B-Chat' + self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True) + # use fp16 + model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval() + model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参 + self._model = model return self._model, self._tokenizer diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index fe6be961..38fcfc91 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -201,7 +201,7 @@ class LocalLLMHandle(Process): if res.startswith(self.std_tag): new_output = res[len(self.std_tag):] std_out = std_out[:std_out_clip_len] - # print(new_output, end='') + print(new_output, end='') std_out = new_output + std_out yield self.std_tag + '\n```\n' + std_out + '\n```\n' elif res == '[Finish]': diff --git a/tests/test_llms.py b/tests/test_llms.py index 5c5d2f6c..6285f030 100644 --- a/tests/test_llms.py +++ b/tests/test_llms.py @@ -15,11 +15,11 @@ if __name__ == "__main__": # from request_llms.bridge_jittorllms_pangualpha import predict_no_ui_long_connection # from request_llms.bridge_jittorllms_llama import predict_no_ui_long_connection # from request_llms.bridge_claude import predict_no_ui_long_connection - # from request_llms.bridge_internlm import predict_no_ui_long_connection + from request_llms.bridge_internlm import predict_no_ui_long_connection # from request_llms.bridge_qwen import predict_no_ui_long_connection # from request_llms.bridge_spark import predict_no_ui_long_connection # from request_llms.bridge_zhipu import predict_no_ui_long_connection - from request_llms.bridge_chatglm3 import predict_no_ui_long_connection + # from request_llms.bridge_chatglm3 import predict_no_ui_long_connection llm_kwargs = { 'max_length': 4096, diff --git a/version b/version index 5e4fb7d0..69a871e0 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.57, + "version": 3.58, "show_feature": true, - "new_feature": "支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" + "new_feature": "修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" } From 28119e343ce75b393019806b3600d512dddb8262 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 22:01:19 +0800 Subject: [PATCH 07/11] =?UTF-8?q?=E5=B0=86autogen=E5=A4=A7=E6=A8=A1?= =?UTF-8?q?=E5=9E=8B=E8=B0=83=E7=94=A8=E5=BA=95=E5=B1=82hook=E6=8E=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 2 +- crazy_functional.py | 21 +- crazy_functions/agent_fns/auto_agent.py | 2 - crazy_functions/agent_fns/bridge_autogen.py | 584 -------------------- crazy_functions/agent_fns/general.py | 46 +- crazy_functions/多智能体.py | 9 +- request_llms/bridge_all.py | 2 +- requirements.txt | 1 + 8 files changed, 33 insertions(+), 634 deletions(-) delete mode 100644 crazy_functions/agent_fns/bridge_autogen.py diff --git a/config.py b/config.py index f578aa85..dfcd9cf1 100644 --- a/config.py +++ b/config.py @@ -211,7 +211,7 @@ ALLOW_RESET_CONFIG = False # 在使用AutoGen插件时,是否使用Docker容器运行代码 -AUTOGEN_USE_DOCKER = True +AUTOGEN_USE_DOCKER = False # 临时的上传文件夹位置,请勿修改 diff --git a/crazy_functional.py b/crazy_functional.py index 155fc76c..a5c77ea0 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -539,18 +539,15 @@ def get_crazy_functions(): except: print('Load function plugin failed') - try: - from crazy_functions.多智能体 import 多智能体终端 - function_plugins.update({ - "多智能体终端(微软AutoGen)": { - "Group": "智能体", - "Color": "stop", - "AsButton": True, - "Function": HotReload(多智能体终端) - } - }) - except: - print('Load function plugin failed') + from crazy_functions.多智能体 import 多智能体终端 + function_plugins.update({ + "多智能体终端(微软AutoGen)": { + "Group": "智能体", + "Color": "stop", + "AsButton": True, + "Function": HotReload(多智能体终端) + } + }) # try: # from crazy_functions.chatglm微调工具 import 微调数据集生成 diff --git a/crazy_functions/agent_fns/auto_agent.py b/crazy_functions/agent_fns/auto_agent.py index f6a2832c..6edf0e25 100644 --- a/crazy_functions/agent_fns/auto_agent.py +++ b/crazy_functions/agent_fns/auto_agent.py @@ -2,8 +2,6 @@ from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom from crazy_functions.agent_fns.general import AutoGenGeneral -import time -from autogen import AssistantAgent, UserProxyAgent diff --git a/crazy_functions/agent_fns/bridge_autogen.py b/crazy_functions/agent_fns/bridge_autogen.py deleted file mode 100644 index 5bf4aacd..00000000 --- a/crazy_functions/agent_fns/bridge_autogen.py +++ /dev/null @@ -1,584 +0,0 @@ -from time import sleep -import logging -import time -from typing import List, Optional, Dict, Callable, Union -import sys -import shutil -import numpy as np -from flaml import tune, BlendSearch -from flaml.tune.space import is_constant -from flaml.automl.logger import logger_formatter -from collections import defaultdict - -try: - import openai - from openai.error import ( - ServiceUnavailableError, - RateLimitError, - APIError, - InvalidRequestError, - APIConnectionError, - Timeout, - AuthenticationError, - ) - from openai import Completion as openai_Completion - import diskcache - - ERROR = None -except ImportError: - ERROR = ImportError("please install openai and diskcache to use the autogen.oai subpackage.") - openai_Completion = object -logger = logging.getLogger(__name__) -if not logger.handlers: - # Add the console handler. - _ch = logging.StreamHandler(stream=sys.stdout) - _ch.setFormatter(logger_formatter) - logger.addHandler(_ch) - - -class Completion(openai_Completion): - """A class for OpenAI completion API. - - It also supports: ChatCompletion, Azure OpenAI API. - """ - - # set of models that support chat completion - chat_models = { - "gpt-3.5-turbo", - "gpt-3.5-turbo-0301", # deprecate in Sep - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-16k-0613", - "gpt-35-turbo", - "gpt-35-turbo-16k", - "gpt-4", - "gpt-4-32k", - "gpt-4-32k-0314", # deprecate in Sep - "gpt-4-0314", # deprecate in Sep - "gpt-4-0613", - "gpt-4-32k-0613", - } - - # price per 1k tokens - price1K = { - "text-ada-001": 0.0004, - "text-babbage-001": 0.0005, - "text-curie-001": 0.002, - "code-cushman-001": 0.024, - "code-davinci-002": 0.1, - "text-davinci-002": 0.02, - "text-davinci-003": 0.02, - "gpt-3.5-turbo": (0.0015, 0.002), - "gpt-3.5-turbo-instruct": (0.0015, 0.002), - "gpt-3.5-turbo-0301": (0.0015, 0.002), # deprecate in Sep - "gpt-3.5-turbo-0613": (0.0015, 0.002), - "gpt-3.5-turbo-16k": (0.003, 0.004), - "gpt-3.5-turbo-16k-0613": (0.003, 0.004), - "gpt-35-turbo": (0.0015, 0.002), - "gpt-35-turbo-16k": (0.003, 0.004), - "gpt-35-turbo-instruct": (0.0015, 0.002), - "gpt-4": (0.03, 0.06), - "gpt-4-32k": (0.06, 0.12), - "gpt-4-0314": (0.03, 0.06), # deprecate in Sep - "gpt-4-32k-0314": (0.06, 0.12), # deprecate in Sep - "gpt-4-0613": (0.03, 0.06), - "gpt-4-32k-0613": (0.06, 0.12), - } - - default_search_space = { - "model": tune.choice( - [ - "text-ada-001", - "text-babbage-001", - "text-davinci-003", - "gpt-3.5-turbo", - "gpt-4", - ] - ), - "temperature_or_top_p": tune.choice( - [ - {"temperature": tune.uniform(0, 2)}, - {"top_p": tune.uniform(0, 1)}, - ] - ), - "max_tokens": tune.lograndint(50, 1000), - "n": tune.randint(1, 100), - "prompt": "{prompt}", - } - - seed = 41 - cache_path = f".cache/{seed}" - # retry after this many seconds - retry_wait_time = 10 - # fail a request after hitting RateLimitError for this many seconds - max_retry_period = 120 - # time out for request to openai server - request_timeout = 60 - - openai_completion_class = not ERROR and openai.Completion - _total_cost = 0 - optimization_budget = None - - _history_dict = _count_create = None - - @classmethod - def set_cache(cls, seed: Optional[int] = 41, cache_path_root: Optional[str] = ".cache"): - """Set cache path. - - Args: - seed (int, Optional): The integer identifier for the pseudo seed. - Results corresponding to different seeds will be cached in different places. - cache_path (str, Optional): The root path for the cache. - The complete cache path will be {cache_path}/{seed}. - """ - cls.seed = seed - cls.cache_path = f"{cache_path_root}/{seed}" - - @classmethod - def clear_cache(cls, seed: Optional[int] = None, cache_path_root: Optional[str] = ".cache"): - """Clear cache. - - Args: - seed (int, Optional): The integer identifier for the pseudo seed. - If omitted, all caches under cache_path_root will be cleared. - cache_path (str, Optional): The root path for the cache. - The complete cache path will be {cache_path}/{seed}. - """ - if seed is None: - shutil.rmtree(cache_path_root, ignore_errors=True) - return - with diskcache.Cache(f"{cache_path_root}/{seed}") as cache: - cache.clear() - - @classmethod - def _book_keeping(cls, config: Dict, response): - """Book keeping for the created completions.""" - if response != -1 and "cost" not in response: - response["cost"] = cls.cost(response) - if cls._history_dict is None: - return - if cls._history_compact: - value = { - "created_at": [], - "cost": [], - "token_count": [], - } - if "messages" in config: - messages = config["messages"] - if len(messages) > 1 and messages[-1]["role"] != "assistant": - existing_key = get_key(messages[:-1]) - value = cls._history_dict.pop(existing_key, value) - key = get_key(messages + [choice["message"] for choice in response["choices"]]) - else: - key = get_key([config["prompt"]] + [choice.get("text") for choice in response["choices"]]) - value["created_at"].append(cls._count_create) - value["cost"].append(response["cost"]) - value["token_count"].append( - { - "model": response["model"], - "prompt_tokens": response["usage"]["prompt_tokens"], - "completion_tokens": response["usage"].get("completion_tokens", 0), - "total_tokens": response["usage"]["total_tokens"], - } - ) - cls._history_dict[key] = value - cls._count_create += 1 - return - cls._history_dict[cls._count_create] = { - "request": config, - "response": response.to_dict_recursive(), - } - cls._count_create += 1 - - @classmethod - def _get_response(cls, config: Dict, raise_on_ratelimit_or_timeout=False, use_cache=True): - """Get the response from the openai api call. - - Try cache first. If not found, call the openai api. If the api call fails, retry after retry_wait_time. - """ - config = config.copy() - - - @classmethod - def _get_max_valid_n(cls, key, max_tokens): - # find the max value in max_valid_n_per_max_tokens - # whose key is equal or larger than max_tokens - return max( - (value for k, value in cls._max_valid_n_per_max_tokens.get(key, {}).items() if k >= max_tokens), - default=1, - ) - - @classmethod - def _get_min_invalid_n(cls, key, max_tokens): - # find the min value in min_invalid_n_per_max_tokens - # whose key is equal or smaller than max_tokens - return min( - (value for k, value in cls._min_invalid_n_per_max_tokens.get(key, {}).items() if k <= max_tokens), - default=None, - ) - - @classmethod - def _get_region_key(cls, config): - # get a key for the valid/invalid region corresponding to the given config - config = cls._pop_subspace(config, always_copy=False) - return ( - config["model"], - config.get("prompt", config.get("messages")), - config.get("stop"), - ) - - @classmethod - def _update_invalid_n(cls, prune, region_key, max_tokens, num_completions): - if prune: - # update invalid n and prune this config - cls._min_invalid_n_per_max_tokens[region_key] = invalid_n = cls._min_invalid_n_per_max_tokens.get( - region_key, {} - ) - invalid_n[max_tokens] = min(num_completions, invalid_n.get(max_tokens, np.inf)) - - @classmethod - def _pop_subspace(cls, config, always_copy=True): - if "subspace" in config: - config = config.copy() - config.update(config.pop("subspace")) - return config.copy() if always_copy else config - - @classmethod - def _get_params_for_create(cls, config: Dict) -> Dict: - """Get the params for the openai api call from a config in the search space.""" - params = cls._pop_subspace(config) - if cls._prompts: - params["prompt"] = cls._prompts[config["prompt"]] - else: - params["messages"] = cls._messages[config["messages"]] - if "stop" in params: - params["stop"] = cls._stops and cls._stops[params["stop"]] - temperature_or_top_p = params.pop("temperature_or_top_p", None) - if temperature_or_top_p: - params.update(temperature_or_top_p) - if cls._config_list and "config_list" not in params: - params["config_list"] = cls._config_list - return params - - @classmethod - def create( - cls, - context: Optional[Dict] = None, - use_cache: Optional[bool] = True, - config_list: Optional[List[Dict]] = None, - filter_func: Optional[Callable[[Dict, Dict, Dict], bool]] = None, - raise_on_ratelimit_or_timeout: Optional[bool] = True, - allow_format_str_template: Optional[bool] = False, - **config, - ): - """Make a completion for a given context. - - Args: - context (Dict, Optional): The context to instantiate the prompt. - It needs to contain keys that are used by the prompt template or the filter function. - E.g., `prompt="Complete the following sentence: {prefix}, context={"prefix": "Today I feel"}`. - The actual prompt will be: - "Complete the following sentence: Today I feel". - More examples can be found at [templating](https://microsoft.github.io/autogen/docs/Use-Cases/enhanced_inference#templating). - use_cache (bool, Optional): Whether to use cached responses. - config_list (List, Optional): List of configurations for the completion to try. - The first one that does not raise an error will be used. - Only the differences from the default config need to be provided. - E.g., - - ```python - response = oai.Completion.create( - config_list=[ - { - "model": "gpt-4", - "api_key": os.environ.get("AZURE_OPENAI_API_KEY"), - "api_type": "azure", - "api_base": os.environ.get("AZURE_OPENAI_API_BASE"), - "api_version": "2023-03-15-preview", - }, - { - "model": "gpt-3.5-turbo", - "api_key": os.environ.get("OPENAI_API_KEY"), - "api_type": "open_ai", - "api_base": "https://api.openai.com/v1", - }, - { - "model": "llama-7B", - "api_base": "http://127.0.0.1:8080", - "api_type": "open_ai", - } - ], - prompt="Hi", - ) - ``` - - filter_func (Callable, Optional): A function that takes in the context, the config and the response and returns a boolean to indicate whether the response is valid. E.g., - - ```python - def yes_or_no_filter(context, config, response): - return context.get("yes_or_no_choice", False) is False or any( - text in ["Yes.", "No."] for text in oai.Completion.extract_text(response) - ) - ``` - - raise_on_ratelimit_or_timeout (bool, Optional): Whether to raise RateLimitError or Timeout when all configs fail. - When set to False, -1 will be returned when all configs fail. - allow_format_str_template (bool, Optional): Whether to allow format string template in the config. - **config: Configuration for the openai API call. This is used as parameters for calling openai API. - The "prompt" or "messages" parameter can contain a template (str or Callable) which will be instantiated with the context. - Besides the parameters for the openai API call, it can also contain: - - `max_retry_period` (int): the total time (in seconds) allowed for retrying failed requests. - - `retry_wait_time` (int): the time interval to wait (in seconds) before retrying a failed request. - - `seed` (int) for the cache. This is useful when implementing "controlled randomness" for the completion. - - Returns: - Responses from OpenAI API, with additional fields. - - `cost`: the total cost. - When `config_list` is provided, the response will contain a few more fields: - - `config_id`: the index of the config in the config_list that is used to generate the response. - - `pass_filter`: whether the response passes the filter function. None if no filter is provided. - """ - if ERROR: - raise ERROR - config_list = [ - { - "model": "llama-7B", - "api_base": "http://127.0.0.1:8080", - "api_type": "open_ai", - } - ] - last = len(config_list) - 1 - cost = 0 - for i, each_config in enumerate(config_list): - base_config = config.copy() - base_config["allow_format_str_template"] = allow_format_str_template - base_config.update(each_config) - if i < last and filter_func is None and "max_retry_period" not in base_config: - # max_retry_period = 0 to avoid retrying when no filter is given - base_config["max_retry_period"] = 0 - try: - response = cls.create( - context, - use_cache, - raise_on_ratelimit_or_timeout=i < last or raise_on_ratelimit_or_timeout, - **base_config, - ) - if response == -1: - return response - pass_filter = filter_func is None or filter_func( - context=context, base_config=config, response=response - ) - if pass_filter or i == last: - response["cost"] = cost + response["cost"] - response["config_id"] = i - response["pass_filter"] = pass_filter - return response - cost += response["cost"] - except (AuthenticationError, RateLimitError, Timeout, InvalidRequestError): - logger.debug(f"failed with config {i}", exc_info=1) - if i == last: - raise - - params = cls._construct_params(context, config, allow_format_str_template=allow_format_str_template) - if not use_cache: - return cls._get_response( - params, raise_on_ratelimit_or_timeout=raise_on_ratelimit_or_timeout, use_cache=False - ) - seed = cls.seed - if "seed" in params: - cls.set_cache(params.pop("seed")) - with diskcache.Cache(cls.cache_path) as cls._cache: - cls.set_cache(seed) - return cls._get_response(params, raise_on_ratelimit_or_timeout=raise_on_ratelimit_or_timeout) - - @classmethod - def instantiate( - cls, - template: Union[str, None], - context: Optional[Dict] = None, - allow_format_str_template: Optional[bool] = False, - ): - if not context or template is None: - return template - if isinstance(template, str): - return template.format(**context) if allow_format_str_template else template - return template(context) - - @classmethod - def _construct_params(cls, context, config, prompt=None, messages=None, allow_format_str_template=False): - params = config.copy() - model = config["model"] - prompt = config.get("prompt") if prompt is None else prompt - messages = config.get("messages") if messages is None else messages - # either "prompt" should be in config (for being compatible with non-chat models) - # or "messages" should be in config (for tuning chat models only) - if prompt is None and (model in cls.chat_models or issubclass(cls, ChatCompletion)): - if messages is None: - raise ValueError("Either prompt or messages should be in config for chat models.") - if prompt is None: - params["messages"] = ( - [ - { - **m, - "content": cls.instantiate(m["content"], context, allow_format_str_template), - } - if m.get("content") - else m - for m in messages - ] - if context - else messages - ) - elif model in cls.chat_models or issubclass(cls, ChatCompletion): - # convert prompt to messages - params["messages"] = [ - { - "role": "user", - "content": cls.instantiate(prompt, context, allow_format_str_template), - }, - ] - params.pop("prompt", None) - else: - params["prompt"] = cls.instantiate(prompt, context, allow_format_str_template) - return params - - @classmethod - def extract_text(cls, response: dict) -> List[str]: - """Extract the text from a completion or chat response. - - Args: - response (dict): The response from OpenAI API. - - Returns: - A list of text in the responses. - """ - choices = response["choices"] - if "text" in choices[0]: - return [choice["text"] for choice in choices] - return [choice["message"].get("content", "") for choice in choices] - - @classmethod - def extract_text_or_function_call(cls, response: dict) -> List[str]: - """Extract the text or function calls from a completion or chat response. - - Args: - response (dict): The response from OpenAI API. - - Returns: - A list of text or function calls in the responses. - """ - choices = response["choices"] - if "text" in choices[0]: - return [choice["text"] for choice in choices] - return [ - choice["message"] if "function_call" in choice["message"] else choice["message"].get("content", "") - for choice in choices - ] - - @classmethod - @property - def logged_history(cls) -> Dict: - """Return the book keeping dictionary.""" - return cls._history_dict - - @classmethod - def print_usage_summary(cls) -> Dict: - """Return the usage summary.""" - if cls._history_dict is None: - print("No usage summary available.", flush=True) - - token_count_summary = defaultdict(lambda: {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}) - - if not cls._history_compact: - source = cls._history_dict.values() - total_cost = sum(msg_pair["response"]["cost"] for msg_pair in source) - else: - # source = cls._history_dict["token_count"] - # total_cost = sum(cls._history_dict['cost']) - total_cost = sum(sum(value_list["cost"]) for value_list in cls._history_dict.values()) - source = ( - token_data for value_list in cls._history_dict.values() for token_data in value_list["token_count"] - ) - - for entry in source: - if not cls._history_compact: - model = entry["response"]["model"] - token_data = entry["response"]["usage"] - else: - model = entry["model"] - token_data = entry - - token_count_summary[model]["prompt_tokens"] += token_data["prompt_tokens"] - token_count_summary[model]["completion_tokens"] += token_data["completion_tokens"] - token_count_summary[model]["total_tokens"] += token_data["total_tokens"] - - print(f"Total cost: {total_cost}", flush=True) - for model, counts in token_count_summary.items(): - print( - f"Token count summary for model {model}: prompt_tokens: {counts['prompt_tokens']}, completion_tokens: {counts['completion_tokens']}, total_tokens: {counts['total_tokens']}", - flush=True, - ) - - @classmethod - def start_logging( - cls, history_dict: Optional[Dict] = None, compact: Optional[bool] = True, reset_counter: Optional[bool] = True - ): - """Start book keeping. - - Args: - history_dict (Dict): A dictionary for book keeping. - If no provided, a new one will be created. - compact (bool): Whether to keep the history dictionary compact. - Compact history contains one key per conversation, and the value is a dictionary - like: - ```python - { - "create_at": [0, 1], - "cost": [0.1, 0.2], - } - ``` - where "created_at" is the index of API calls indicating the order of all the calls, - and "cost" is the cost of each call. This example shows that the conversation is based - on two API calls. The compact format is useful for condensing the history of a conversation. - If compact is False, the history dictionary will contain all the API calls: the key - is the index of the API call, and the value is a dictionary like: - ```python - { - "request": request_dict, - "response": response_dict, - } - ``` - where request_dict is the request sent to OpenAI API, and response_dict is the response. - For a conversation containing two API calls, the non-compact history dictionary will be like: - ```python - { - 0: { - "request": request_dict_0, - "response": response_dict_0, - }, - 1: { - "request": request_dict_1, - "response": response_dict_1, - }, - ``` - The first request's messages plus the response is equal to the second request's messages. - For a conversation with many turns, the non-compact history dictionary has a quadratic size - while the compact history dict has a linear size. - reset_counter (bool): whether to reset the counter of the number of API calls. - """ - cls._history_dict = {} if history_dict is None else history_dict - cls._history_compact = compact - cls._count_create = 0 if reset_counter or cls._count_create is None else cls._count_create - - @classmethod - def stop_logging(cls): - """End book keeping.""" - cls._history_dict = cls._count_create = None - - -class ChatCompletion(Completion): - """A class for OpenAI API ChatCompletion. Share the same API as Completion.""" - - default_search_space = Completion.default_search_space.copy() - default_search_space["model"] = tune.choice(["gpt-3.5-turbo", "gpt-4"]) - openai_completion_class = not ERROR and openai.ChatCompletion diff --git a/crazy_functions/agent_fns/general.py b/crazy_functions/agent_fns/general.py index f0b9ce87..8b2884de 100644 --- a/crazy_functions/agent_fns/general.py +++ b/crazy_functions/agent_fns/general.py @@ -9,17 +9,27 @@ def gpt_academic_generate_oai_reply( sender, config, ): - from .bridge_autogen import Completion llm_config = self.llm_config if config is None else config if llm_config is False: return False, None if messages is None: messages = self._oai_messages[sender] - response = Completion.create( - context=messages[-1].pop("context", None), messages=self._oai_system_message + messages, **llm_config + inputs = messages[-1]['content'] + history = [] + for message in messages[:-1]: + history.append(message['content']) + context=messages[-1].pop("context", None) + assert context is None, "预留参数 context 未实现" + + reply = predict_no_ui_long_connection( + inputs=inputs, + llm_kwargs=llm_config, + history=history, + sys_prompt=self._oai_system_message[0]['content'], + console_slience=True ) - return True, Completion.extract_text_or_function_call(response)[0] + return True, reply class AutoGenGeneral(PluginMultiprocessManager): def gpt_academic_print_override(self, user_proxy, message, sender): @@ -45,32 +55,6 @@ class AutoGenGeneral(PluginMultiprocessManager): else: raise TimeoutError("等待用户输入超时") - # def gpt_academic_generate_oai_reply(self, agent, messages, sender, config): - # from .bridge_autogen import Completion - # if messages is None: - # messages = agent._oai_messages[sender] - - # def instantiate( - # cls, - # template: Union[str, None], - # context: Optional[Dict] = None, - # allow_format_str_template: Optional[bool] = False, - # ): - # if not context or template is None: - # return template - # if isinstance(template, str): - # return template.format(**context) if allow_format_str_template else template - # return template(context) - - # res = predict_no_ui_long_connection( - # messages[-1].pop("context", None), - # llm_kwargs=self.llm_kwargs, - # history=messages, - # sys_prompt=agent._oai_system_message, - # observe_window=None, - # console_slience=False) - # return True, res - def define_agents(self): raise NotImplementedError @@ -85,7 +69,7 @@ class AutoGenGeneral(PluginMultiprocessManager): for agent_kwargs in agents: agent_cls = agent_kwargs.pop('cls') kwargs = { - 'llm_config':{}, + 'llm_config':self.llm_kwargs, 'code_execution_config':code_execution_config } kwargs.update(agent_kwargs) diff --git a/crazy_functions/多智能体.py b/crazy_functions/多智能体.py index 8a530f17..22429f3d 100644 --- a/crazy_functions/多智能体.py +++ b/crazy_functions/多智能体.py @@ -41,11 +41,11 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ "azure-gpt-4", "azure-gpt-4-32k", ] - llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) if llm_kwargs['llm_model'] not in supported_llms: chatbot.append([f"处理任务: {txt}", f"当前插件只支持{str(supported_llms)}, 当前模型{llm_kwargs['llm_model']}."]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return + llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) # 检查当前的模型是否符合要求 API_URL_REDIRECT = get_conf('API_URL_REDIRECT') @@ -56,7 +56,9 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ # 尝试导入依赖,如果缺少依赖,则给出安装建议 try: - import autogen, docker + import autogen + if get_conf("AUTOGEN_USE_DOCKER"): + import docker except: chatbot.append([ f"处理任务: {txt}", f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pyautogen docker```。"]) @@ -67,7 +69,8 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ try: import autogen import glob, os, time, subprocess - subprocess.Popen(['docker', '--version']) + if get_conf("AUTOGEN_USE_DOCKER"): + subprocess.Popen(["docker", "--version"]) except: chatbot.append([f"处理任务: {txt}", f"缺少docker运行环境!"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 27b91c26..4c41f374 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -548,7 +548,7 @@ def LLM_CATCH_EXCEPTION(f): return decorated -def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False): +def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window=[], console_slience=False): """ 发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。 inputs: diff --git a/requirements.txt b/requirements.txt index e832a28c..1f86d336 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,6 +15,7 @@ Markdown pygments pymupdf openai +pyautogen numpy arxiv rich From f7f6db831beb891a9b5daa291db9136c1ef11dc2 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 22:35:06 +0800 Subject: [PATCH 08/11] =?UTF-8?q?=E5=A4=84=E7=90=86=E6=A8=A1=E5=9E=8B?= =?UTF-8?q?=E5=85=BC=E5=AE=B9=E7=9A=84=E4=B8=80=E4=BA=9B=E7=BB=86=E8=8A=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/agent_fns/general.py | 1 + crazy_functions/agent_fns/pipe.py | 7 ++++++- crazy_functions/多智能体.py | 8 +++++--- request_llms/bridge_all.py | 7 +++++++ 4 files changed, 19 insertions(+), 4 deletions(-) diff --git a/crazy_functions/agent_fns/general.py b/crazy_functions/agent_fns/general.py index 8b2884de..49bc4dc8 100644 --- a/crazy_functions/agent_fns/general.py +++ b/crazy_functions/agent_fns/general.py @@ -29,6 +29,7 @@ def gpt_academic_generate_oai_reply( sys_prompt=self._oai_system_message[0]['content'], console_slience=True ) + assumed_done = reply.endswith('\nTERMINATE') return True, reply class AutoGenGeneral(PluginMultiprocessManager): diff --git a/crazy_functions/agent_fns/pipe.py b/crazy_functions/agent_fns/pipe.py index 680e91c9..6ce9961a 100644 --- a/crazy_functions/agent_fns/pipe.py +++ b/crazy_functions/agent_fns/pipe.py @@ -21,7 +21,7 @@ class PluginMultiprocessManager: # self.web_port = web_port self.alive = True self.use_docker = get_conf("AUTOGEN_USE_DOCKER") - + self.last_user_input = "" # create a thread to monitor self.heartbeat, terminate the instance if no heartbeat for a long time timeout_seconds = 5 * 60 self.heartbeat_watchdog = WatchDog(timeout=timeout_seconds, bark_fn=self.terminate, interval=5) @@ -55,6 +55,11 @@ class PluginMultiprocessManager: def send_command(self, cmd): # ⭐ run in main process + if cmd == self.last_user_input: + print('repeated input detected, ignore') + cmd = "" + else: + self.last_user_input = cmd self.parent_conn.send(PipeCom("user_input", cmd)) def immediate_showoff_when_possible(self, fp): diff --git a/crazy_functions/多智能体.py b/crazy_functions/多智能体.py index 590c638f..8b9a69cd 100644 --- a/crazy_functions/多智能体.py +++ b/crazy_functions/多智能体.py @@ -48,11 +48,13 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ "azure-gpt-4", "azure-gpt-4-32k", ] - if llm_kwargs['llm_model'] not in supported_llms: - chatbot.append([f"处理任务: {txt}", f"当前插件只支持{str(supported_llms)}, 当前模型{llm_kwargs['llm_model']}."]) + from request_llms.bridge_all import model_info + if model_info[llm_kwargs['llm_model']]["max_token"] < 8000: # 至少是8k上下文的模型 + chatbot.append([f"处理任务: {txt}", f"当前插件只支持{str(supported_llms)}, 当前模型{llm_kwargs['llm_model']}的最大上下文长度太短, 不能支撑AutoGen运行。"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return - llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) + if model_info[llm_kwargs['llm_model']]["endpoint"] is not None: # 如果不是本地模型,加载API_KEY + llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) # 检查当前的模型是否符合要求 API_URL_REDIRECT = get_conf('API_URL_REDIRECT') diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 646e7a64..7d13bbdd 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -242,6 +242,13 @@ for model in AVAIL_LLM_MODELS: mi.update({"endpoint": api2d_endpoint}) model_info.update({model: mi}) +# -=-=-=-=-=-=- azure 对齐支持 -=-=-=-=-=-=- +for model in AVAIL_LLM_MODELS: + if model.startswith('azure-') and (model.replace('azure-','') in model_info.keys()): + mi = model_info[model.replace('azure-','')] + mi.update({"endpoint": azure_endpoint}) + model_info.update({model: mi}) + # -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=- if "claude-1-100k" in AVAIL_LLM_MODELS or "claude-2" in AVAIL_LLM_MODELS: from .bridge_claude import predict_no_ui_long_connection as claude_noui From a55bc0c07cef0498e3e65c6b0605b6141c018f52 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 23:22:09 +0800 Subject: [PATCH 09/11] =?UTF-8?q?AutoGen=E8=87=AA=E5=8A=A8=E5=BF=BD?= =?UTF-8?q?=E7=95=A5=E9=87=8D=E5=A4=8D=E7=9A=84=E8=BE=93=E5=85=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/agent_fns/pipe.py | 10 +++++++--- crazy_functions/多智能体.py | 5 ----- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/crazy_functions/agent_fns/pipe.py b/crazy_functions/agent_fns/pipe.py index 6ce9961a..bb3bc785 100644 --- a/crazy_functions/agent_fns/pipe.py +++ b/crazy_functions/agent_fns/pipe.py @@ -55,12 +55,14 @@ class PluginMultiprocessManager: def send_command(self, cmd): # ⭐ run in main process + repeated = False if cmd == self.last_user_input: - print('repeated input detected, ignore') + repeated = True cmd = "" else: self.last_user_input = cmd self.parent_conn.send(PipeCom("user_input", cmd)) + return repeated, cmd def immediate_showoff_when_possible(self, fp): # ⭐ 主进程 @@ -111,7 +113,7 @@ class PluginMultiprocessManager: if create_or_resume == 'create': self.cnt = 1 self.parent_conn = self.launch_subprocess_with_pipe() # ⭐⭐⭐ - self.send_command(txt) + repeated, cmd_to_autogen = self.send_command(txt) if txt == 'exit': self.chatbot.append([f"结束", "结束信号已明确,终止AutoGen程序。"]) yield from update_ui(chatbot=self.chatbot, history=self.history) @@ -143,7 +145,9 @@ class PluginMultiprocessManager: break if msg.cmd == "show": yield from self.overwatch_workdir_file_change() - self.chatbot.append([f"运行阶段-{self.cnt}", msg.content]) + notice = "" + if repeated: notice = "(自动忽略重复的输入)" + self.chatbot.append([f"运行阶段-{self.cnt}(上次用户反馈输入为: 「{cmd_to_autogen}」{notice}", msg.content]) self.cnt += 1 yield from update_ui(chatbot=self.chatbot, history=self.history) if msg.cmd == "interact": diff --git a/crazy_functions/多智能体.py b/crazy_functions/多智能体.py index 8b9a69cd..d2adee00 100644 --- a/crazy_functions/多智能体.py +++ b/crazy_functions/多智能体.py @@ -3,11 +3,6 @@ 测试: - show me the solution of $x^2=cos(x)$, solve this problem with figure, and plot and save image to t.jpg -Testing: - - Crop the image, keeping the bottom half. - - Swap the blue channel and red channel of the image. - - Convert the image to grayscale. - - Convert the CSV file to an Excel spreadsheet. """ From 2d91e438d658220c6b366aecf3aaa81e09eb75c4 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 23:22:50 +0800 Subject: [PATCH 10/11] =?UTF-8?q?=E4=BF=AE=E6=AD=A3internlm=E8=BE=93?= =?UTF-8?q?=E5=85=A5=E8=AE=BE=E5=A4=87bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_internlm.py | 3 ++- request_llms/local_llm_class.py | 33 +++++++++++++++------------------ 2 files changed, 17 insertions(+), 19 deletions(-) diff --git a/request_llms/bridge_internlm.py b/request_llms/bridge_internlm.py index 20b53b44..b2be36a4 100644 --- a/request_llms/bridge_internlm.py +++ b/request_llms/bridge_internlm.py @@ -94,8 +94,9 @@ class GetInternlmHandle(LocalLLMHandle): inputs = tokenizer([prompt], padding=True, return_tensors="pt") input_length = len(inputs["input_ids"][0]) + device = get_conf('LOCAL_MODEL_DEVICE') for k, v in inputs.items(): - inputs[k] = v.cuda() + inputs[k] = v.to(device) input_ids = inputs["input_ids"] batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1] if generation_config is None: diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index 38fcfc91..413df03f 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -1,6 +1,6 @@ import time import threading -from toolbox import update_ui +from toolbox import update_ui, Singleton from multiprocessing import Process, Pipe from contextlib import redirect_stdout from request_llms.queued_pipe import create_queue_pipe @@ -26,23 +26,20 @@ class ThreadLock(object): def __exit__(self, type, value, traceback): self.release() -def SingletonLocalLLM(cls): - """ - Singleton Decroator for LocalLLMHandle - """ - _instance = {} +@Singleton +class GetSingletonHandle(): + def __init__(self): + self.llm_model_already_running = {} - def _singleton(*args, **kargs): - if cls not in _instance: - _instance[cls] = cls(*args, **kargs) - return _instance[cls] - elif _instance[cls].corrupted: - _instance[cls] = cls(*args, **kargs) - return _instance[cls] + def get_llm_model_instance(self, cls, *args, **kargs): + if cls not in self.llm_model_already_running: + self.llm_model_already_running[cls] = cls(*args, **kargs) + return self.llm_model_already_running[cls] + elif self.llm_model_already_running[cls].corrupted: + self.llm_model_already_running[cls] = cls(*args, **kargs) + return self.llm_model_already_running[cls] else: - return _instance[cls] - return _singleton - + return self.llm_model_already_running[cls] def reset_tqdm_output(): import sys, tqdm @@ -221,7 +218,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla """ refer to request_llms/bridge_all.py """ - _llm_handle = SingletonLocalLLM(LLMSingletonClass)() + _llm_handle = GetSingletonHandle().get_llm_model_instance(LLMSingletonClass) if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + _llm_handle.get_state() if not _llm_handle.running: @@ -269,7 +266,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla """ chatbot.append((inputs, "")) - _llm_handle = SingletonLocalLLM(LLMSingletonClass)() + _llm_handle = GetSingletonHandle().get_llm_model_instance(LLMSingletonClass) chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.get_state()) yield from update_ui(chatbot=chatbot, history=[]) if not _llm_handle.running: From eeb70e966ce7953b6caba910da6838bd044ae1d9 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sat, 11 Nov 2023 23:35:11 +0800 Subject: [PATCH 11/11] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=8F=92=E4=BB=B6?= =?UTF-8?q?=E6=8C=89=E9=92=AE=E9=A1=BA=E5=BA=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 18 +++++++++--------- version | 4 ++-- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index c86aac15..1e7ca584 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -74,7 +74,7 @@ def get_crazy_functions(): "批量总结Word文档": { "Group": "学术", "Color": "stop", - "AsButton": True, + "AsButton": False, "Info": "批量总结word文档 | 输入参数为路径", "Function": HotReload(总结word文档) }, @@ -178,6 +178,13 @@ def get_crazy_functions(): "Info": "批量生成函数的注释 | 输入参数为路径", "Function": HotReload(批量生成函数注释) }, + "精准翻译PDF论文": { + "Group": "学术", + "Color": "stop", + "AsButton": True, + "Info": "精准翻译PDF论文为中文 | 输入参数为路径", + "Function": HotReload(批量翻译PDF文档) + }, "保存当前的对话": { "Group": "对话", "AsButton": True, @@ -196,13 +203,6 @@ def get_crazy_functions(): "Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数", "Function": HotReload(高阶功能模板函数) }, - "精准翻译PDF论文": { - "Group": "学术", - "Color": "stop", - "AsButton": True, - "Info": "精准翻译PDF论文为中文 | 输入参数为路径", - "Function": HotReload(批量翻译PDF文档) - }, "询问多个GPT模型": { "Group": "对话", "Color": "stop", @@ -563,7 +563,7 @@ def get_crazy_functions(): from crazy_functions.多智能体 import 多智能体终端 function_plugins.update({ - "多智能体终端(微软AutoGen)": { + "AutoGen多智能体终端": { "Group": "智能体", "Color": "stop", "AsButton": True, diff --git a/version b/version index 69a871e0..9b33c4f4 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.58, + "version": 3.59, "show_feature": true, - "new_feature": "修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" + "new_feature": "AutoGen多智能体插件测试版 <-> 修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" }