Rebase v3.0

这个提交包含在:
qingxu fu
2023-04-15 15:24:18 +08:00
父节点 ea6541c114
当前提交 91609d6d39
共有 17 个文件被更改,包括 397 次插入118 次删除

查看文件

@@ -1,35 +1,53 @@
# 如何使用其他大语言模型v3.0分支测试中)
## 1. 先运行text-generation
## ChatGLM
- 安装依赖 `pip install -r request_llm/requirements_chatglm.txt`
- 修改配置,在config.py中将LLM_MODEL的值改为"chatglm"
``` sh
# 下载模型( text-generation 这么牛的项目,别忘了给人家star
LLM_MODEL = "chatglm"
```
- 运行!
``` sh
`python main.py`
```
---
## Text-Generation-UI (TGUI)
### 1. 部署TGUI
``` sh
# 1 下载模型
git clone https://github.com/oobabooga/text-generation-webui.git
# 安装text-generation的额外依赖
pip install accelerate bitsandbytes flexgen gradio llamacpp markdown numpy peft requests rwkv safetensors sentencepiece tqdm datasets git+https://github.com/huggingface/transformers
# 切换路径
# 2 这个仓库的最新代码有问题,回滚到几周之前
git reset --hard fcda3f87767e642d1c0411776e549e1d3894843d
# 3 切换路径
cd text-generation-webui
# 下载模型
# 4 安装text-generation的额外依赖
pip install accelerate bitsandbytes flexgen gradio llamacpp markdown numpy peft requests rwkv safetensors sentencepiece tqdm datasets git+https://github.com/huggingface/transformers
# 5 下载模型
python download-model.py facebook/galactica-1.3b
# 其他可选如 facebook/opt-1.3b
# facebook/galactica-1.3b
# facebook/galactica-6.7b
# facebook/galactica-120b
# facebook/pygmalion-1.3b 等
# 详情见 https://github.com/oobabooga/text-generation-webui
# 启动text-generation,注意把模型的斜杠改成下划线
python server.py --cpu --listen --listen-port 7860 --model facebook_galactica-1.3b
# 6 启动text-generation
python server.py --cpu --listen --listen-port 7865 --model facebook_galactica-1.3b
```
## 2. 修改config.py
### 2. 修改config.py
``` sh
# LLM_MODEL格式较复杂 TGUI:[模型]@[ws地址]:[ws端口] , 端口要和上面给定的端口一致
LLM_MODEL = "TGUI:galactica-1.3b@localhost:7860"
# LLM_MODEL格式: tgui:[模型]@[ws地址]:[ws端口] , 端口要和上面给定的端口一致
LLM_MODEL = "tgui:galactica-1.3b@localhost:7860"
```
## 3. 运行!
### 3. 运行!
``` sh
cd chatgpt-academic
python main.py

135
request_llm/bridge_all.py 普通文件
查看文件

@@ -0,0 +1,135 @@
"""
该文件中主要包含2个函数
不具备多线程能力的函数:
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
具备多线程调用能力的函数
2. predict_no_ui_long_connection在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程
"""
from concurrent.futures import ThreadPoolExecutor
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
from .bridge_chatgpt import predict as chatgpt_ui
from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui
from .bridge_chatglm import predict as chatglm_ui
from .bridge_tgui import predict_no_ui_long_connection as tgui_noui
from .bridge_tgui import predict as tgui_ui
methods = {
"openai-no-ui": chatgpt_noui,
"openai-ui": chatgpt_ui,
"chatglm-no-ui": chatglm_noui,
"chatglm-ui": chatglm_ui,
"tgui-no-ui": tgui_noui,
"tgui-ui": tgui_ui,
}
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
"""
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
inputs
是本次问询的输入
sys_prompt:
系统静默prompt
llm_kwargs
LLM的内部调优参数
history
是之前的对话列表
observe_window = None
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]观测窗。observe_window[1]:看门狗
"""
import threading, time, copy
model = llm_kwargs['llm_model']
n_model = 1
if '&' not in model:
assert not model.startswith("tgui"), "TGUI不支持函数插件的实现"
# 如果只询问1个大语言模型
if model.startswith('gpt'):
method = methods['openai-no-ui']
elif model == 'chatglm':
method = methods['chatglm-no-ui']
elif model.startswith('tgui'):
method = methods['tgui-no-ui']
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
else:
# 如果同时询问多个大语言模型:
executor = ThreadPoolExecutor(max_workers=16)
models = model.split('&')
n_model = len(models)
window_len = len(observe_window)
if window_len==0:
window_mutex = [[] for _ in range(n_model)] + [True]
elif window_len==1:
window_mutex = [[""] for _ in range(n_model)] + [True]
elif window_len==2:
window_mutex = [["", time.time()] for _ in range(n_model)] + [True]
futures = []
for i in range(n_model):
model = models[i]
if model.startswith('gpt'):
method = methods['openai-no-ui']
elif model == 'chatglm':
method = methods['chatglm-no-ui']
elif model.startswith('tgui'):
method = methods['tgui-no-ui']
llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
llm_kwargs_feedin['llm_model'] = model
future = executor.submit(method, inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
futures.append(future)
def mutex_manager(window_mutex, observe_window):
while True:
time.sleep(0.2)
if not window_mutex[-1]: break
# 看门狗watchdog
for i in range(n_model):
window_mutex[i][1] = observe_window[1]
# 观察窗window
chat_string = []
for i in range(n_model):
chat_string.append( f"[{str(models[i])} 说]: {window_mutex[i][0]}" )
res = '\n\n---\n\n'.join(chat_string)
# # # # # # # # # # #
observe_window[0] = res
t_model = threading.Thread(target=mutex_manager, args=(window_mutex, observe_window), daemon=True)
t_model.start()
return_string_collect = []
for i, future in enumerate(futures): # wait and get
return_string_collect.append( f"[{str(models[i])} 说]: {future.result()}" )
window_mutex[-1] = False # stop mutex thread
res = '\n\n---\n\n'.join(return_string_collect)
return res
def predict(inputs, llm_kwargs, *args, **kwargs):
"""
发送至LLM,流式获取输出。
用于基础的对话功能。
inputs 是本次问询的输入
top_p, temperature是LLM的内部调优参数
history 是之前的对话列表注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
additional_fn代表点击的哪个按钮,按钮见functional.py
"""
if llm_kwargs['llm_model'].startswith('gpt'):
method = methods['openai-ui']
elif llm_kwargs['llm_model'] == 'chatglm':
method = methods['chatglm-ui']
elif llm_kwargs['llm_model'].startswith('tgui'):
method = methods['tgui-ui']
yield from method(inputs, llm_kwargs, *args, **kwargs)

查看文件

@@ -0,0 +1,83 @@
from transformers import AutoModel, AutoTokenizer
import time
import importlib
from toolbox import update_ui, get_conf
global chatglm_model, chatglm_tokenizer
chatglm_model = None
chatglm_tokenizer = None
def model_loader():
global chatglm_model, chatglm_tokenizer
if chatglm_tokenizer is None:
chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
if chatglm_model is None: # 尚未加载
device, = get_conf('LOCAL_MODEL_DEVICE')
if device=='cpu':
chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
else:
chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
chatglm_model = chatglm_model.eval()
chatglm_model = chatglm_model.eval()
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
"""
函数的说明请见 request_llm/bridge_all.py
"""
global chatglm_model, chatglm_tokenizer
if chatglm_model is None:
observe_window[0] = "ChatGLM尚未加载,加载需要一段时间 ……"
model_loader()
# chatglm 没有 sys_prompt 接口,因此把prompt加入 history
history_feedin = []
for i in range(len(history)//2):
history_feedin.append(["What can I do?", sys_prompt] )
history_feedin.append([history[2*i], history[2*i+1]] )
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
response = ""
for response, history in chatglm_model.stream_chat(chatglm_tokenizer, inputs, history=history_feedin, max_length=llm_kwargs['max_length'],
top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
# 观测窗,把已经获取的数据显示出去
observe_window[0] = response
# 看门狗 (watchdog),如果超过期限没有喂狗,则终止
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("程序终止。")
# if not console_slience:
# print(response)
return response
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
函数的说明请见 request_llm/bridge_all.py
"""
global chatglm_model, chatglm_tokenizer
chatbot.append((inputs, ""))
if chatglm_model is None:
chatbot[-1] = (inputs, "ChatGLM尚未加载,加载需要一段时间 ……")
yield from update_ui(chatbot=chatbot, history=[])
model_loader()
if additional_fn is not None:
import core_functional
importlib.reload(core_functional) # 热更新prompt
core_functional = core_functional.get_core_functions()
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
history_feedin = []
for i in range(len(history)//2):
history_feedin.append(["What can I do?", system_prompt] )
history_feedin.append([history[2*i], history[2*i+1]] )
for response, history in chatglm_model.stream_chat(chatglm_tokenizer, inputs, history=history_feedin, max_length=llm_kwargs['max_length'],
top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)

查看文件

@@ -13,23 +13,18 @@ import time
import threading
import importlib
from toolbox import get_conf, update_ui
LLM_MODEL, = get_conf('LLM_MODEL')
# "TGUI:galactica-1.3b@localhost:7860"
model_name, addr_port = LLM_MODEL.split('@')
assert ':' in addr_port, "LLM_MODEL 格式不正确!" + LLM_MODEL
addr, port = addr_port.split(':')
def random_hash():
letters = string.ascii_lowercase + string.digits
return ''.join(random.choice(letters) for i in range(9))
async def run(context, max_token=512):
async def run(context, max_token, temperature, top_p, addr, port):
params = {
'max_new_tokens': max_token,
'do_sample': True,
'temperature': 0.5,
'top_p': 0.9,
'temperature': temperature,
'top_p': top_p,
'typical_p': 1,
'repetition_penalty': 1.05,
'encoder_repetition_penalty': 1.0,
@@ -90,7 +85,7 @@ async def run(context, max_token=512):
def predict_tgui(inputs, top_p, temperature, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
发送至chatGPT,流式获取输出。
用于基础的对话功能。
@@ -108,18 +103,26 @@ def predict_tgui(inputs, top_p, temperature, chatbot, history=[], system_prompt=
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
raw_input = "What I would like to say is the following: " + inputs
logging.info(f'[raw_input] {raw_input}')
history.extend([inputs, ""])
chatbot.append([inputs, ""])
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
prompt = inputs
prompt = raw_input
tgui_say = ""
model_name, addr_port = llm_kwargs['llm_model'].split('@')
assert ':' in addr_port, "LLM_MODEL 格式不正确!" + llm_kwargs['llm_model']
addr, port = addr_port.split(':')
mutable = ["", time.time()]
def run_coorotine(mutable):
async def get_result(mutable):
async for response in run(prompt):
# "tgui:galactica-1.3b@localhost:7860"
async for response in run(context=prompt, max_token=llm_kwargs['max_length'],
temperature=llm_kwargs['temperature'],
top_p=llm_kwargs['top_p'], addr=addr, port=port):
print(response[len(mutable[0]):])
mutable[0] = response
if (time.time() - mutable[1]) > 3:
@@ -140,28 +143,29 @@ def predict_tgui(inputs, top_p, temperature, chatbot, history=[], system_prompt=
chatbot[-1] = (history[-2], history[-1])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
logging.info(f'[response] {tgui_say}')
def predict_tgui_no_ui(inputs, top_p, temperature, history=[], sys_prompt=""):
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
raw_input = "What I would like to say is the following: " + inputs
prompt = inputs
prompt = raw_input
tgui_say = ""
mutable = ["", time.time()]
def run_coorotine(mutable):
async def get_result(mutable):
async for response in run(prompt, max_token=20):
print(response[len(mutable[0]):])
mutable[0] = response
if (time.time() - mutable[1]) > 3:
model_name, addr_port = llm_kwargs['llm_model'].split('@')
assert ':' in addr_port, "LLM_MODEL 格式不正确!" + llm_kwargs['llm_model']
addr, port = addr_port.split(':')
def run_coorotine(observe_window):
async def get_result(observe_window):
async for response in run(context=prompt, max_token=llm_kwargs['max_length'],
temperature=llm_kwargs['temperature'],
top_p=llm_kwargs['top_p'], addr=addr, port=port):
print(response[len(observe_window[0]):])
observe_window[0] = response
if (time.time() - observe_window[1]) > 5:
print('exit when no listener')
break
asyncio.run(get_result(mutable))
thread_listen = threading.Thread(target=run_coorotine, args=(mutable,))
asyncio.run(get_result(observe_window))
thread_listen = threading.Thread(target=run_coorotine, args=(observe_window,))
thread_listen.start()
while thread_listen.is_alive():
time.sleep(1)
mutable[1] = time.time()
tgui_say = mutable[0]
return tgui_say
return observe_window[0]

查看文件

@@ -0,0 +1,6 @@
protobuf
transformers==4.27.1
cpm_kernels
torch>=1.10
mdtex2html
sentencepiece