combine qwen model family

这个提交包含在:
binary-husky
2023-12-04 10:30:02 +08:00
父节点 2cef81abbe
当前提交 0cd3274d04
共有 3 个文件被更改,包括 17 次插入19 次删除

查看文件

@@ -15,13 +15,13 @@ API_KEY = "此处填API密钥" # 可同时填写多个API-KEY,用英文逗
USE_PROXY = False
if USE_PROXY:
"""
代理网络的地址,打开你的代理软件查看代理协议(socks5h / http)、地址(localhost)和端口(11284)
填写格式是 [协议]:// [地址] :[端口],填写之前不要忘记把USE_PROXY改成True,如果直接在海外服务器部署,此处不修改
<配置教程&视频教程> https://github.com/binary-husky/gpt_academic/issues/1>
[协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http
[地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了localhost意思是代理软件安装在本机上
[地址] 填localhost或者127.0.0.1localhost意思是代理软件安装在本机上
[端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
"""
# 代理网络的地址,打开你的*学*网软件查看代理的协议(socks5h / http)、地址(localhost)和端口(11284)
proxies = {
# [协议]:// [地址] :[端口]
"http": "socks5h://localhost:11284", # 再例如 "http": "http://127.0.0.1:7890",
@@ -100,6 +100,10 @@ AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview","gpt-4-vision-prev
MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
# 选择本地模型变体只有当AVAIL_LLM_MODELS包含了对应本地模型时,才会起作用
QWEN_MODEL_SELECTION = "Qwen/Qwen-1_8B-Chat-Int8"
# 百度千帆LLM_MODEL="qianfan"
BAIDU_CLOUD_API_KEY = ''
BAIDU_CLOUD_SECRET_KEY = ''

查看文件

@@ -1,13 +1,7 @@
model_name = "Qwen-7B"
model_name = "Qwen"
cmd_to_install = "`pip install -r request_llms/requirements_qwen.txt`"
from transformers import AutoModel, AutoTokenizer
import time
import threading
import importlib
from toolbox import update_ui, get_conf, ProxyNetworkActivate
from multiprocessing import Process, Pipe
from toolbox import ProxyNetworkActivate, get_conf
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
@@ -24,16 +18,14 @@ class GetQwenLMHandle(LocalLLMHandle):
def load_model_and_tokenizer(self):
# 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
import os, glob
import os
import platform
from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
# from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
with ProxyNetworkActivate('Download_LLM'):
model_id = 'qwen/Qwen-7B-Chat' #在这里更改路径,如果你已经下载好了的话,同时,别忘记tokenizer
self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True)
model_id = get_conf('QWEN_MODEL_SELECTION') #在这里更改路径,如果你已经下载好了的话,同时,别忘记tokenizer
self._tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, resume_download=True)
# use fp16
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval()
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True).eval()
model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
self._model = model

查看文件

@@ -1,2 +1,4 @@
modelscope
transformers_stream_generator
auto-gptq
optimum