镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 14:36:48 +00:00
尝试使用自然语言调度各个插件
这个提交包含在:
@@ -90,7 +90,7 @@ class GptJsonIO():
|
||||
try:
|
||||
logging.info(f'Repairing json:{response}')
|
||||
repair_prompt = self.generate_repair_prompt(broken_json = response, error=repr(e))
|
||||
result = self.generate_output(gpt_gen_fn(repair_prompt, self.generate_format_instructions()))
|
||||
result = self.generate_output(gpt_gen_fn(repair_prompt, self.format_instructions))
|
||||
logging.info('Repaire json success.')
|
||||
except Exception as e:
|
||||
# 没辙了,放弃治疗
|
||||
|
||||
@@ -0,0 +1,48 @@
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List
|
||||
from toolbox import update_ui_lastest_msg, get_conf
|
||||
from request_llm.bridge_all import predict_no_ui_long_connection
|
||||
from crazy_functions.json_fns.pydantic_io import GptJsonIO
|
||||
import copy, json, pickle, os, sys
|
||||
|
||||
def read_avail_plugin_enum():
|
||||
from crazy_functional import get_crazy_functions
|
||||
plugin_arr = get_crazy_functions()
|
||||
# remove plugins with out explaination
|
||||
plugin_arr = {k:v for k, v in plugin_arr.items() if 'Info' in v}
|
||||
plugin_arr_info = {"F{:04d}".format(i):v["Info"] for i, v in enumerate(plugin_arr.values(), start=1)}
|
||||
plugin_arr_dict = {"F{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)}
|
||||
prompt = json.dumps(plugin_arr_info, ensure_ascii=False, indent=2)
|
||||
prompt = "\n\nThe defination of PluginEnum:\nPluginEnum=" + prompt
|
||||
return prompt, plugin_arr_dict
|
||||
|
||||
def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention):
|
||||
plugin_arr_enum_prompt, plugin_arr_dict = read_avail_plugin_enum()
|
||||
class Plugin(BaseModel):
|
||||
plugin_selection: str = Field(description="The most related plugin from one of the PluginEnum.", default="F0000000000000")
|
||||
plugin_arg: str = Field(description="The argument of the plugin. A path or url or empty.", default="")
|
||||
|
||||
# ⭐ ⭐ ⭐ 选择插件
|
||||
yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n查找可用插件中...", chatbot=chatbot, history=history, delay=0)
|
||||
gpt_json_io = GptJsonIO(Plugin)
|
||||
gpt_json_io.format_instructions += plugin_arr_enum_prompt
|
||||
inputs = "Choose the correct plugin and extract plugin_arg, the user requirement is: \n\n" + \
|
||||
">>" + txt + '\n\n' + \
|
||||
gpt_json_io.format_instructions
|
||||
run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection(
|
||||
inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[])
|
||||
plugin_sel = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn)
|
||||
|
||||
if plugin_sel.plugin_selection in plugin_arr_dict:
|
||||
# ⭐ ⭐ ⭐ 执行插件
|
||||
plugin = plugin_arr_dict[plugin_sel.plugin_selection]
|
||||
fn = plugin['Function']
|
||||
fn_name = fn.__name__
|
||||
msg = f'正在调用插件: {fn_name}\n\n插件说明:{plugin["Info"]}'
|
||||
yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
|
||||
yield from fn(plugin_sel.plugin_arg, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, -1)
|
||||
return
|
||||
else:
|
||||
msg = f'找不到合适插件执行该任务'
|
||||
yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
|
||||
return
|
||||
@@ -8,24 +8,17 @@ from crazy_functions.crazy_utils import input_clipping
|
||||
from crazy_functions.json_fns.pydantic_io import GptJsonIO
|
||||
from crazy_functions.vt_fns.vt_modify_config import modify_configuration_hot
|
||||
from crazy_functions.vt_fns.vt_modify_config import modify_configuration_reboot
|
||||
from crazy_functions.vt_fns.vt_call_plugin import execute_plugin
|
||||
from enum import Enum
|
||||
import copy, json, pickle, os, sys
|
||||
|
||||
class IntentionEnum(str, Enum):
|
||||
ModifyConfiguration = 'ModifyConfiguration'
|
||||
ExecutePlugin = 'ExecutePlugin'
|
||||
Chat = 'Chat'
|
||||
|
||||
class UserIntention(BaseModel):
|
||||
user_prompt: str = Field(description="the content of user input", default="")
|
||||
intention_type: IntentionEnum = Field(description="the type of user intention", default=IntentionEnum.Chat)
|
||||
intention_type: str = Field(description="the type of user intention, choose from ['ModifyConfiguration', 'ExecutePlugin', 'Chat']", default="Chat")
|
||||
user_provide_file: bool = Field(description="whether the user provides a path to a file", default=False)
|
||||
user_provide_url: bool = Field(description="whether the user provides a url", default=False)
|
||||
|
||||
def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention):
|
||||
# 没写完
|
||||
pass
|
||||
|
||||
def chat(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention):
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=txt, inputs_show_user=txt,
|
||||
@@ -37,6 +30,21 @@ def chat(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_i
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
pass
|
||||
|
||||
def analyze_with_rule(txt):
|
||||
user_intention = UserIntention()
|
||||
user_intention.user_prompt = txt
|
||||
is_certain = False
|
||||
|
||||
if '调用插件' in txt:
|
||||
is_certain = True
|
||||
user_intention.intention_type = 'ExecutePlugin'
|
||||
|
||||
if '修改配置' in txt:
|
||||
is_certain = True
|
||||
user_intention.intention_type = 'ModifyConfiguration'
|
||||
|
||||
return is_certain, user_intention
|
||||
|
||||
@CatchException
|
||||
def 自动终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
"""
|
||||
@@ -63,23 +71,27 @@ def 自动终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
||||
chatbot._cookies['vt_state'] = pickle.dumps(state)
|
||||
|
||||
# ⭐ ⭐ ⭐ 分析用户意图
|
||||
yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n分析用户意图中", chatbot=chatbot, history=history, delay=0)
|
||||
gpt_json_io = GptJsonIO(UserIntention)
|
||||
inputs = "Analyze the intention of the user according to following user input: \n\n" + txt + '\n\n' + gpt_json_io.format_instructions
|
||||
run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection(
|
||||
inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[])
|
||||
user_intention = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn)
|
||||
is_certain, user_intention = analyze_with_rule(txt)
|
||||
if not is_certain:
|
||||
yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n分析用户意图中", chatbot=chatbot, history=history, delay=0)
|
||||
gpt_json_io = GptJsonIO(UserIntention)
|
||||
inputs = "Analyze the intention of the user according to following user input: \n\n" + txt + '\n\n' + gpt_json_io.format_instructions
|
||||
run_gpt_fn = lambda inputs, sys_prompt: predict_no_ui_long_connection(
|
||||
inputs=inputs, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=[])
|
||||
user_intention = gpt_json_io.generate_output_auto_repair(run_gpt_fn(inputs, ""), run_gpt_fn)
|
||||
else:
|
||||
pass
|
||||
|
||||
# 用户意图: 修改本项目的配置
|
||||
if user_intention.intention_type == IntentionEnum.ModifyConfiguration:
|
||||
if user_intention.intention_type == 'ModifyConfiguration':
|
||||
yield from modify_configuration_reboot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention)
|
||||
|
||||
# 用户意图: 调度插件
|
||||
if user_intention.intention_type == IntentionEnum.ExecutePlugin:
|
||||
if user_intention.intention_type == 'ExecutePlugin':
|
||||
yield from execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention)
|
||||
|
||||
# 用户意图: 聊天
|
||||
if user_intention.intention_type == IntentionEnum.Chat:
|
||||
if user_intention.intention_type == 'Chat':
|
||||
yield from chat(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention)
|
||||
|
||||
# update_vt_state()
|
||||
|
||||
在新工单中引用
屏蔽一个用户